2024-11-20 23:35:30,705 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-20 23:35:30,720 main DEBUG Took 0.012472 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 23:35:30,720 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 23:35:30,721 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 23:35:30,722 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 23:35:30,723 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,731 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 23:35:30,746 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,748 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,749 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,749 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,750 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,750 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,751 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,751 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,752 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,752 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,753 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,753 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,754 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,754 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,755 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,755 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,755 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,756 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,756 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,757 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,757 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,757 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,758 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,758 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 23:35:30,758 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,759 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 23:35:30,760 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 23:35:30,761 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 23:35:30,763 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 23:35:30,763 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 23:35:30,765 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 23:35:30,765 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 23:35:30,774 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 23:35:30,776 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 23:35:30,778 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 23:35:30,778 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 23:35:30,778 main DEBUG createAppenders(={Console}) 2024-11-20 23:35:30,779 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-20 23:35:30,779 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-20 23:35:30,780 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-20 23:35:30,780 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 23:35:30,780 main DEBUG OutputStream closed 2024-11-20 23:35:30,781 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 23:35:30,781 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 23:35:30,781 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-20 23:35:30,856 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 23:35:30,858 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 23:35:30,859 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 23:35:30,860 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 23:35:30,861 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 23:35:30,862 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 23:35:30,862 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 23:35:30,862 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 23:35:30,863 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 23:35:30,863 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 23:35:30,863 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 23:35:30,864 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 23:35:30,864 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 23:35:30,864 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 23:35:30,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 23:35:30,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 23:35:30,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 23:35:30,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 23:35:30,869 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 23:35:30,870 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-20 23:35:30,870 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 23:35:30,871 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-20T23:35:31,174 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406 2024-11-20 23:35:31,178 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 23:35:31,179 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T23:35:31,190 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-20T23:35:31,236 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=347, ProcessCount=11, AvailableMemoryMB=1561 2024-11-20T23:35:31,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T23:35:31,263 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/cluster_6b5435d9-f9b6-0860-04a5-363a5d7b51f7, deleteOnExit=true 2024-11-20T23:35:31,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T23:35:31,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/test.cache.data in system properties and HBase conf 2024-11-20T23:35:31,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T23:35:31,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/hadoop.log.dir in system properties and HBase conf 2024-11-20T23:35:31,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T23:35:31,269 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T23:35:31,269 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T23:35:31,396 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T23:35:31,521 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T23:35:31,527 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:35:31,527 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:35:31,528 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T23:35:31,529 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:35:31,529 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T23:35:31,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T23:35:31,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:35:31,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:35:31,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T23:35:31,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/nfs.dump.dir in system properties and HBase conf 2024-11-20T23:35:31,535 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/java.io.tmpdir in system properties and HBase conf 2024-11-20T23:35:31,536 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:35:31,536 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T23:35:31,537 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T23:35:32,088 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:35:32,695 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T23:35:32,776 INFO [Time-limited test {}] log.Log(170): Logging initialized @2975ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T23:35:32,844 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:35:32,908 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:35:32,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:35:32,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:35:32,931 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T23:35:32,944 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:35:32,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:35:32,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:35:33,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/java.io.tmpdir/jetty-localhost-37319-hadoop-hdfs-3_4_1-tests_jar-_-any-2310718299959446175/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:35:33,157 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:37319} 2024-11-20T23:35:33,157 INFO [Time-limited test {}] server.Server(415): Started @3357ms 2024-11-20T23:35:33,199 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:35:33,946 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:35:33,955 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:35:33,957 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:35:33,957 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:35:33,958 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:35:33,959 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:35:33,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:35:34,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/java.io.tmpdir/jetty-localhost-45649-hadoop-hdfs-3_4_1-tests_jar-_-any-5950640589437251637/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:35:34,100 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:45649} 2024-11-20T23:35:34,101 INFO [Time-limited test {}] server.Server(415): Started @4301ms 2024-11-20T23:35:34,156 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:35:34,270 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:35:34,278 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:35:34,279 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:35:34,279 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:35:34,279 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:35:34,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:35:34,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:35:34,416 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/java.io.tmpdir/jetty-localhost-39149-hadoop-hdfs-3_4_1-tests_jar-_-any-6633404330029680596/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:35:34,417 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:39149} 2024-11-20T23:35:34,417 INFO [Time-limited test {}] server.Server(415): Started @4617ms 2024-11-20T23:35:34,419 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:35:35,550 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/cluster_6b5435d9-f9b6-0860-04a5-363a5d7b51f7/data/data1/current/BP-1444344256-172.17.0.2-1732145732167/current, will proceed with Du for space computation calculation, 2024-11-20T23:35:35,550 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/cluster_6b5435d9-f9b6-0860-04a5-363a5d7b51f7/data/data2/current/BP-1444344256-172.17.0.2-1732145732167/current, will proceed with Du for space computation calculation, 2024-11-20T23:35:35,550 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/cluster_6b5435d9-f9b6-0860-04a5-363a5d7b51f7/data/data3/current/BP-1444344256-172.17.0.2-1732145732167/current, will proceed with Du for space computation calculation, 2024-11-20T23:35:35,550 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/cluster_6b5435d9-f9b6-0860-04a5-363a5d7b51f7/data/data4/current/BP-1444344256-172.17.0.2-1732145732167/current, will proceed with Du for space computation calculation, 2024-11-20T23:35:35,588 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:35:35,588 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:35:35,641 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9bcd36481c584042 with lease ID 0xc3869a0b1026ea42: Processing first storage report for DS-4260de69-9d7e-48fa-89dc-8f5425639a25 from datanode DatanodeRegistration(127.0.0.1:39693, datanodeUuid=34685de6-fcbe-4837-98b1-5cfe9cf69bba, infoPort=35935, infoSecurePort=0, ipcPort=44107, storageInfo=lv=-57;cid=testClusterID;nsid=1011293241;c=1732145732167) 2024-11-20T23:35:35,643 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9bcd36481c584042 with lease ID 0xc3869a0b1026ea42: from storage DS-4260de69-9d7e-48fa-89dc-8f5425639a25 node DatanodeRegistration(127.0.0.1:39693, datanodeUuid=34685de6-fcbe-4837-98b1-5cfe9cf69bba, infoPort=35935, infoSecurePort=0, ipcPort=44107, storageInfo=lv=-57;cid=testClusterID;nsid=1011293241;c=1732145732167), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T23:35:35,643 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c2db666602c709 with lease ID 0xc3869a0b1026ea43: Processing first storage report for DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1 from datanode DatanodeRegistration(127.0.0.1:38923, datanodeUuid=a5b8cb3b-20d4-4a60-b10a-5f9b38cfd3d8, infoPort=34397, infoSecurePort=0, ipcPort=40127, storageInfo=lv=-57;cid=testClusterID;nsid=1011293241;c=1732145732167) 2024-11-20T23:35:35,644 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c2db666602c709 with lease ID 0xc3869a0b1026ea43: from storage DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1 node DatanodeRegistration(127.0.0.1:38923, datanodeUuid=a5b8cb3b-20d4-4a60-b10a-5f9b38cfd3d8, infoPort=34397, infoSecurePort=0, ipcPort=40127, storageInfo=lv=-57;cid=testClusterID;nsid=1011293241;c=1732145732167), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:35:35,644 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9bcd36481c584042 with lease ID 0xc3869a0b1026ea42: Processing first storage report for DS-48ad483b-a835-4687-be32-4638719a2f74 from datanode DatanodeRegistration(127.0.0.1:39693, datanodeUuid=34685de6-fcbe-4837-98b1-5cfe9cf69bba, infoPort=35935, infoSecurePort=0, ipcPort=44107, storageInfo=lv=-57;cid=testClusterID;nsid=1011293241;c=1732145732167) 2024-11-20T23:35:35,645 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9bcd36481c584042 with lease ID 0xc3869a0b1026ea42: from storage DS-48ad483b-a835-4687-be32-4638719a2f74 node DatanodeRegistration(127.0.0.1:39693, datanodeUuid=34685de6-fcbe-4837-98b1-5cfe9cf69bba, infoPort=35935, infoSecurePort=0, ipcPort=44107, storageInfo=lv=-57;cid=testClusterID;nsid=1011293241;c=1732145732167), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:35:35,645 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c2db666602c709 with lease ID 0xc3869a0b1026ea43: Processing first storage report for DS-6f05b94f-435d-4137-8592-64ad213dad31 from datanode DatanodeRegistration(127.0.0.1:38923, datanodeUuid=a5b8cb3b-20d4-4a60-b10a-5f9b38cfd3d8, infoPort=34397, infoSecurePort=0, ipcPort=40127, storageInfo=lv=-57;cid=testClusterID;nsid=1011293241;c=1732145732167) 2024-11-20T23:35:35,645 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c2db666602c709 with lease ID 0xc3869a0b1026ea43: from storage DS-6f05b94f-435d-4137-8592-64ad213dad31 node DatanodeRegistration(127.0.0.1:38923, datanodeUuid=a5b8cb3b-20d4-4a60-b10a-5f9b38cfd3d8, infoPort=34397, infoSecurePort=0, ipcPort=40127, storageInfo=lv=-57;cid=testClusterID;nsid=1011293241;c=1732145732167), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T23:35:35,727 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406 2024-11-20T23:35:35,831 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/cluster_6b5435d9-f9b6-0860-04a5-363a5d7b51f7/zookeeper_0, clientPort=55252, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/cluster_6b5435d9-f9b6-0860-04a5-363a5d7b51f7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/cluster_6b5435d9-f9b6-0860-04a5-363a5d7b51f7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T23:35:35,839 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55252 2024-11-20T23:35:35,854 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:35:35,858 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:35:36,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:35:36,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:35:36,487 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346 with version=8 2024-11-20T23:35:36,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/hbase-staging 2024-11-20T23:35:36,568 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T23:35:36,817 INFO [Time-limited test {}] client.ConnectionUtils(128): master/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:35:36,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:35:36,827 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:35:36,831 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:35:36,831 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:35:36,831 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:35:36,951 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T23:35:37,003 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T23:35:37,011 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T23:35:37,015 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:35:37,036 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 98017 (auto-detected) 2024-11-20T23:35:37,037 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T23:35:37,053 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44935 2024-11-20T23:35:37,072 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44935 connecting to ZooKeeper ensemble=127.0.0.1:55252 2024-11-20T23:35:37,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:449350x0, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:35:37,197 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44935-0x1015a9a642d0000 connected 2024-11-20T23:35:37,304 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:35:37,307 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:35:37,318 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:35:37,322 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346, hbase.cluster.distributed=false 2024-11-20T23:35:37,344 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:35:37,349 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44935 2024-11-20T23:35:37,349 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44935 2024-11-20T23:35:37,350 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44935 2024-11-20T23:35:37,350 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44935 2024-11-20T23:35:37,351 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44935 2024-11-20T23:35:37,463 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:35:37,465 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:35:37,465 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:35:37,466 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:35:37,466 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:35:37,466 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:35:37,470 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T23:35:37,473 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:35:37,474 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33793 2024-11-20T23:35:37,477 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33793 connecting to ZooKeeper ensemble=127.0.0.1:55252 2024-11-20T23:35:37,479 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:35:37,486 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:35:37,553 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:337930x0, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:35:37,555 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33793-0x1015a9a642d0001 connected 2024-11-20T23:35:37,555 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:35:37,565 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T23:35:37,576 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T23:35:37,580 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T23:35:37,588 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:35:37,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33793 2024-11-20T23:35:37,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33793 2024-11-20T23:35:37,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33793 2024-11-20T23:35:37,594 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33793 2024-11-20T23:35:37,594 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33793 2024-11-20T23:35:37,609 DEBUG [M:0;412a5e44fd2e:44935 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;412a5e44fd2e:44935 2024-11-20T23:35:37,610 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/412a5e44fd2e,44935,1732145736648 2024-11-20T23:35:37,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:35:37,644 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:35:37,647 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/412a5e44fd2e,44935,1732145736648 2024-11-20T23:35:37,674 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T23:35:37,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:35:37,675 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:35:37,675 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T23:35:37,676 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/412a5e44fd2e,44935,1732145736648 from backup master directory 2024-11-20T23:35:37,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/412a5e44fd2e,44935,1732145736648 2024-11-20T23:35:37,690 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:35:37,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:35:37,691 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:35:37,692 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=412a5e44fd2e,44935,1732145736648 2024-11-20T23:35:37,695 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T23:35:37,696 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T23:35:37,753 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/hbase.id] with ID: f0b29ba6-0d83-4b88-8628-cfcf762e7bbe 2024-11-20T23:35:37,753 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/.tmp/hbase.id 2024-11-20T23:35:37,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:35:37,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:35:37,767 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/.tmp/hbase.id]:[hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/hbase.id] 2024-11-20T23:35:37,816 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:35:37,821 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T23:35:37,839 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-20T23:35:37,853 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:35:37,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:35:37,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:35:37,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:35:37,893 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T23:35:37,895 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T23:35:37,900 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:35:37,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:35:37,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:35:37,945 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store 2024-11-20T23:35:37,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:35:37,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:35:37,973 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T23:35:37,977 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:35:37,978 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:35:37,979 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:35:37,979 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:35:37,981 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:35:37,981 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:35:37,982 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:35:37,984 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732145737978Disabling compacts and flushes for region at 1732145737978Disabling writes for close at 1732145737981 (+3 ms)Writing region close event to WAL at 1732145737981Closed at 1732145737982 (+1 ms) 2024-11-20T23:35:37,987 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/.initializing 2024-11-20T23:35:37,987 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/WALs/412a5e44fd2e,44935,1732145736648 2024-11-20T23:35:38,029 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C44935%2C1732145736648, suffix=, logDir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/WALs/412a5e44fd2e,44935,1732145736648, archiveDir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/oldWALs, maxLogs=10 2024-11-20T23:35:38,040 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C44935%2C1732145736648.1732145738035 2024-11-20T23:35:38,069 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/WALs/412a5e44fd2e,44935,1732145736648/412a5e44fd2e%2C44935%2C1732145736648.1732145738035 2024-11-20T23:35:38,082 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34397:34397),(127.0.0.1/127.0.0.1:35935:35935)] 2024-11-20T23:35:38,083 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:35:38,084 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:35:38,087 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:35:38,089 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:35:38,127 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:35:38,150 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T23:35:38,153 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:38,155 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:35:38,156 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:35:38,159 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T23:35:38,159 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:38,160 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:35:38,161 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:35:38,163 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T23:35:38,163 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:38,164 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:35:38,164 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:35:38,167 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T23:35:38,167 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:38,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:35:38,168 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:35:38,172 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:35:38,173 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:35:38,179 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:35:38,180 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:35:38,184 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T23:35:38,189 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:35:38,194 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:35:38,195 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710098, jitterRate=-0.09706461429595947}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T23:35:38,203 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732145738105Initializing all the Stores at 1732145738107 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145738108 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145738108Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145738109 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145738109Cleaning up temporary data from old regions at 1732145738180 (+71 ms)Region opened successfully at 1732145738202 (+22 ms) 2024-11-20T23:35:38,204 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T23:35:38,236 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59f66ed9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:35:38,265 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T23:35:38,274 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T23:35:38,274 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T23:35:38,277 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T23:35:38,278 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-20T23:35:38,282 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-20T23:35:38,283 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T23:35:38,307 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T23:35:38,316 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T23:35:38,501 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T23:35:38,504 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T23:35:38,506 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T23:35:38,671 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T23:35:38,674 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T23:35:38,679 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T23:35:38,716 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T23:35:38,720 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T23:35:38,732 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T23:35:38,752 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T23:35:38,763 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T23:35:38,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:35:38,774 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:35:38,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:35:38,775 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:35:38,779 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=412a5e44fd2e,44935,1732145736648, sessionid=0x1015a9a642d0000, setting cluster-up flag (Was=false) 2024-11-20T23:35:38,811 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:35:38,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:35:38,843 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T23:35:38,846 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,44935,1732145736648 2024-11-20T23:35:38,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:35:38,869 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:35:38,900 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T23:35:38,903 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,44935,1732145736648 2024-11-20T23:35:38,908 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T23:35:38,976 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T23:35:38,985 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T23:35:38,992 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T23:35:38,997 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 412a5e44fd2e,44935,1732145736648 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T23:35:38,999 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(746): ClusterId : f0b29ba6-0d83-4b88-8628-cfcf762e7bbe 2024-11-20T23:35:39,002 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T23:35:39,004 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:35:39,004 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:35:39,005 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:35:39,005 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:35:39,005 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/412a5e44fd2e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T23:35:39,005 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,005 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:35:39,005 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,010 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:35:39,011 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732145769011 2024-11-20T23:35:39,011 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T23:35:39,013 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T23:35:39,013 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T23:35:39,014 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T23:35:39,014 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T23:35:39,019 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:39,019 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T23:35:39,019 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T23:35:39,020 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T23:35:39,019 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T23:35:39,020 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T23:35:39,021 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,025 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T23:35:39,026 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T23:35:39,026 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T23:35:39,028 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T23:35:39,029 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T23:35:39,029 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T23:35:39,029 DEBUG [RS:0;412a5e44fd2e:33793 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ee1c593, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:35:39,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:35:39,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:35:39,031 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145739030,5,FailOnTimeoutGroup] 2024-11-20T23:35:39,031 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145739031,5,FailOnTimeoutGroup] 2024-11-20T23:35:39,032 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,032 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T23:35:39,033 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T23:35:39,033 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346 2024-11-20T23:35:39,034 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,034 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:35:39,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:35:39,045 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:35:39,046 DEBUG [RS:0;412a5e44fd2e:33793 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;412a5e44fd2e:33793 2024-11-20T23:35:39,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:35:39,049 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T23:35:39,050 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T23:35:39,050 DEBUG [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T23:35:39,050 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:35:39,050 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:39,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:35:39,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:35:39,052 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(2659): reportForDuty to master=412a5e44fd2e,44935,1732145736648 with port=33793, startcode=1732145737422 2024-11-20T23:35:39,054 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:35:39,054 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:39,055 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:35:39,055 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:35:39,058 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:35:39,058 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:39,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:35:39,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:35:39,062 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:35:39,062 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:39,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:35:39,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:35:39,065 DEBUG [RS:0;412a5e44fd2e:33793 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T23:35:39,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740 2024-11-20T23:35:39,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740 2024-11-20T23:35:39,070 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:35:39,070 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:35:39,071 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:35:39,074 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:35:39,078 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:35:39,079 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851668, jitterRate=0.08295208215713501}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:35:39,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732145739045Initializing all the Stores at 1732145739047 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145739047Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145739047Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145739047Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145739047Cleaning up temporary data from old regions at 1732145739070 (+23 ms)Region opened successfully at 1732145739083 (+13 ms) 2024-11-20T23:35:39,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:35:39,084 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:35:39,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:35:39,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:35:39,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:35:39,086 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:35:39,086 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732145739084Disabling compacts and flushes for region at 1732145739084Disabling writes for close at 1732145739084Writing region close event to WAL at 1732145739085 (+1 ms)Closed at 1732145739086 (+1 ms) 2024-11-20T23:35:39,089 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:35:39,089 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T23:35:39,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T23:35:39,104 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:35:39,107 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T23:35:39,126 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46647, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T23:35:39,133 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44935 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 412a5e44fd2e,33793,1732145737422 2024-11-20T23:35:39,135 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44935 {}] master.ServerManager(517): Registering regionserver=412a5e44fd2e,33793,1732145737422 2024-11-20T23:35:39,149 DEBUG [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346 2024-11-20T23:35:39,150 DEBUG [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42045 2024-11-20T23:35:39,150 DEBUG [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T23:35:39,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:35:39,165 DEBUG [RS:0;412a5e44fd2e:33793 {}] zookeeper.ZKUtil(111): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/412a5e44fd2e,33793,1732145737422 2024-11-20T23:35:39,165 WARN [RS:0;412a5e44fd2e:33793 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:35:39,165 INFO [RS:0;412a5e44fd2e:33793 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:35:39,165 DEBUG [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422 2024-11-20T23:35:39,167 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [412a5e44fd2e,33793,1732145737422] 2024-11-20T23:35:39,195 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T23:35:39,211 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T23:35:39,215 INFO [RS:0;412a5e44fd2e:33793 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T23:35:39,215 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,216 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T23:35:39,222 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T23:35:39,223 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,224 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,224 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,224 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,224 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,225 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,225 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:35:39,225 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,225 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,225 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,225 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,226 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,226 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:35:39,226 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:35:39,226 DEBUG [RS:0;412a5e44fd2e:33793 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:35:39,227 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,227 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,228 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,228 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,228 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,228 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,33793,1732145737422-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:35:39,244 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T23:35:39,246 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,33793,1732145737422-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,246 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,247 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.Replication(171): 412a5e44fd2e,33793,1732145737422 started 2024-11-20T23:35:39,258 WARN [412a5e44fd2e:44935 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T23:35:39,264 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,264 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(1482): Serving as 412a5e44fd2e,33793,1732145737422, RpcServer on 412a5e44fd2e/172.17.0.2:33793, sessionid=0x1015a9a642d0001 2024-11-20T23:35:39,265 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T23:35:39,265 DEBUG [RS:0;412a5e44fd2e:33793 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 412a5e44fd2e,33793,1732145737422 2024-11-20T23:35:39,265 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,33793,1732145737422' 2024-11-20T23:35:39,266 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T23:35:39,267 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T23:35:39,268 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T23:35:39,268 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T23:35:39,268 DEBUG [RS:0;412a5e44fd2e:33793 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 412a5e44fd2e,33793,1732145737422 2024-11-20T23:35:39,268 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,33793,1732145737422' 2024-11-20T23:35:39,268 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T23:35:39,269 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T23:35:39,269 DEBUG [RS:0;412a5e44fd2e:33793 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T23:35:39,269 INFO [RS:0;412a5e44fd2e:33793 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T23:35:39,270 INFO [RS:0;412a5e44fd2e:33793 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T23:35:39,384 INFO [RS:0;412a5e44fd2e:33793 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C33793%2C1732145737422, suffix=, logDir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422, archiveDir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/oldWALs, maxLogs=32 2024-11-20T23:35:39,387 INFO [RS:0;412a5e44fd2e:33793 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C33793%2C1732145737422.1732145739386 2024-11-20T23:35:39,396 INFO [RS:0;412a5e44fd2e:33793 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145739386 2024-11-20T23:35:39,397 DEBUG [RS:0;412a5e44fd2e:33793 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35935:35935),(127.0.0.1/127.0.0.1:34397:34397)] 2024-11-20T23:35:39,510 DEBUG [412a5e44fd2e:44935 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T23:35:39,522 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=412a5e44fd2e,33793,1732145737422 2024-11-20T23:35:39,528 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,33793,1732145737422, state=OPENING 2024-11-20T23:35:39,542 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T23:35:39,553 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:35:39,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:35:39,554 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:35:39,554 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:35:39,555 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:35:39,557 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,33793,1732145737422}] 2024-11-20T23:35:39,736 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T23:35:39,740 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51341, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T23:35:39,752 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T23:35:39,753 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:35:39,757 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C33793%2C1732145737422.meta, suffix=.meta, logDir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422, archiveDir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/oldWALs, maxLogs=32 2024-11-20T23:35:39,760 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C33793%2C1732145737422.meta.1732145739760.meta 2024-11-20T23:35:39,768 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.meta.1732145739760.meta 2024-11-20T23:35:39,770 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35935:35935),(127.0.0.1/127.0.0.1:34397:34397)] 2024-11-20T23:35:39,772 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:35:39,774 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T23:35:39,777 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T23:35:39,781 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T23:35:39,785 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T23:35:39,786 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:35:39,786 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T23:35:39,786 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T23:35:39,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:35:39,791 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:35:39,791 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:39,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:35:39,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:35:39,794 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:35:39,794 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:39,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:35:39,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:35:39,796 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:35:39,796 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:39,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:35:39,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:35:39,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:35:39,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:39,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:35:39,800 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:35:39,802 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740 2024-11-20T23:35:39,804 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740 2024-11-20T23:35:39,807 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:35:39,807 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:35:39,808 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:35:39,811 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:35:39,813 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=839548, jitterRate=0.06754077970981598}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:35:39,813 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T23:35:39,815 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732145739786Writing region info on filesystem at 1732145739787 (+1 ms)Initializing all the Stores at 1732145739789 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145739789Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145739789Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145739789Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145739789Cleaning up temporary data from old regions at 1732145739807 (+18 ms)Running coprocessor post-open hooks at 1732145739813 (+6 ms)Region opened successfully at 1732145739815 (+2 ms) 2024-11-20T23:35:39,822 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732145739728 2024-11-20T23:35:39,833 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T23:35:39,834 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T23:35:39,835 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=412a5e44fd2e,33793,1732145737422 2024-11-20T23:35:39,837 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,33793,1732145737422, state=OPEN 2024-11-20T23:35:39,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:35:39,880 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:35:39,880 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:35:39,880 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:35:39,881 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=412a5e44fd2e,33793,1732145737422 2024-11-20T23:35:39,891 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T23:35:39,891 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,33793,1732145737422 in 324 msec 2024-11-20T23:35:39,899 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T23:35:39,899 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 798 msec 2024-11-20T23:35:39,900 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:35:39,901 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T23:35:39,918 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:35:39,919 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,33793,1732145737422, seqNum=-1] 2024-11-20T23:35:39,939 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:35:39,941 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54997, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:35:39,962 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0230 sec 2024-11-20T23:35:39,962 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732145739962, completionTime=-1 2024-11-20T23:35:39,965 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T23:35:39,965 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T23:35:39,993 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T23:35:39,993 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732145799993 2024-11-20T23:35:39,994 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732145859993 2024-11-20T23:35:39,994 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 28 msec 2024-11-20T23:35:39,996 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,44935,1732145736648-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,996 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,44935,1732145736648-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,997 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,44935,1732145736648-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,998 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-412a5e44fd2e:44935, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,998 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:39,999 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:40,005 DEBUG [master/412a5e44fd2e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T23:35:40,026 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.334sec 2024-11-20T23:35:40,027 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T23:35:40,028 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T23:35:40,029 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T23:35:40,029 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T23:35:40,029 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T23:35:40,030 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,44935,1732145736648-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:35:40,030 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,44935,1732145736648-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T23:35:40,038 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T23:35:40,039 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T23:35:40,040 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,44935,1732145736648-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:35:40,108 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:35:40,110 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T23:35:40,110 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T23:35:40,113 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 412a5e44fd2e,44935,-1 for getting cluster id 2024-11-20T23:35:40,116 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T23:35:40,125 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f0b29ba6-0d83-4b88-8628-cfcf762e7bbe' 2024-11-20T23:35:40,128 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T23:35:40,128 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f0b29ba6-0d83-4b88-8628-cfcf762e7bbe" 2024-11-20T23:35:40,130 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27bbb91a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:35:40,130 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [412a5e44fd2e,44935,-1] 2024-11-20T23:35:40,133 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T23:35:40,136 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:35:40,137 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49256, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T23:35:40,140 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:35:40,141 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:35:40,148 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,33793,1732145737422, seqNum=-1] 2024-11-20T23:35:40,148 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:35:40,151 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37524, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:35:40,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=412a5e44fd2e,44935,1732145736648 2024-11-20T23:35:40,170 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:35:40,177 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T23:35:40,180 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T23:35:40,185 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 412a5e44fd2e,44935,1732145736648 2024-11-20T23:35:40,187 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@66649c1a 2024-11-20T23:35:40,188 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T23:35:40,190 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49266, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T23:35:40,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T23:35:40,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T23:35:40,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T23:35:40,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-20T23:35:40,206 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T23:35:40,209 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-20T23:35:40,209 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:40,212 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T23:35:40,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T23:35:40,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741835_1011 (size=389) 2024-11-20T23:35:40,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741835_1011 (size=389) 2024-11-20T23:35:40,263 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 016bb07afb95e5389c469e78d274caf0, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346 2024-11-20T23:35:40,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741836_1012 (size=72) 2024-11-20T23:35:40,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741836_1012 (size=72) 2024-11-20T23:35:40,275 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:35:40,275 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 016bb07afb95e5389c469e78d274caf0, disabling compactions & flushes 2024-11-20T23:35:40,275 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:35:40,275 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:35:40,275 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. after waiting 0 ms 2024-11-20T23:35:40,275 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:35:40,275 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:35:40,276 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 016bb07afb95e5389c469e78d274caf0: Waiting for close lock at 1732145740275Disabling compacts and flushes for region at 1732145740275Disabling writes for close at 1732145740275Writing region close event to WAL at 1732145740275Closed at 1732145740275 2024-11-20T23:35:40,279 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T23:35:40,283 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732145740279"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732145740279"}]},"ts":"1732145740279"} 2024-11-20T23:35:40,289 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T23:35:40,291 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T23:35:40,294 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732145740291"}]},"ts":"1732145740291"} 2024-11-20T23:35:40,299 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-20T23:35:40,300 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=016bb07afb95e5389c469e78d274caf0, ASSIGN}] 2024-11-20T23:35:40,303 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=016bb07afb95e5389c469e78d274caf0, ASSIGN 2024-11-20T23:35:40,305 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=016bb07afb95e5389c469e78d274caf0, ASSIGN; state=OFFLINE, location=412a5e44fd2e,33793,1732145737422; forceNewPlan=false, retain=false 2024-11-20T23:35:40,457 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=016bb07afb95e5389c469e78d274caf0, regionState=OPENING, regionLocation=412a5e44fd2e,33793,1732145737422 2024-11-20T23:35:40,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=016bb07afb95e5389c469e78d274caf0, ASSIGN because future has completed 2024-11-20T23:35:40,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 016bb07afb95e5389c469e78d274caf0, server=412a5e44fd2e,33793,1732145737422}] 2024-11-20T23:35:40,628 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:35:40,628 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 016bb07afb95e5389c469e78d274caf0, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:35:40,629 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:35:40,629 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:35:40,629 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:35:40,629 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:35:40,632 INFO [StoreOpener-016bb07afb95e5389c469e78d274caf0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:35:40,635 INFO [StoreOpener-016bb07afb95e5389c469e78d274caf0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 016bb07afb95e5389c469e78d274caf0 columnFamilyName info 2024-11-20T23:35:40,635 DEBUG [StoreOpener-016bb07afb95e5389c469e78d274caf0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:35:40,636 INFO [StoreOpener-016bb07afb95e5389c469e78d274caf0-1 {}] regionserver.HStore(327): Store=016bb07afb95e5389c469e78d274caf0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:35:40,637 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:35:40,638 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0 2024-11-20T23:35:40,639 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0 2024-11-20T23:35:40,640 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:35:40,640 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:35:40,643 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:35:40,647 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:35:40,648 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 016bb07afb95e5389c469e78d274caf0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782628, jitterRate=-0.004837051033973694}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T23:35:40,648 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:35:40,649 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 016bb07afb95e5389c469e78d274caf0: Running coprocessor pre-open hook at 1732145740629Writing region info on filesystem at 1732145740629Initializing all the Stores at 1732145740632 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145740632Cleaning up temporary data from old regions at 1732145740640 (+8 ms)Running coprocessor post-open hooks at 1732145740648 (+8 ms)Region opened successfully at 1732145740649 (+1 ms) 2024-11-20T23:35:40,651 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0., pid=6, masterSystemTime=1732145740617 2024-11-20T23:35:40,655 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:35:40,655 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:35:40,656 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=016bb07afb95e5389c469e78d274caf0, regionState=OPEN, openSeqNum=2, regionLocation=412a5e44fd2e,33793,1732145737422 2024-11-20T23:35:40,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 016bb07afb95e5389c469e78d274caf0, server=412a5e44fd2e,33793,1732145737422 because future has completed 2024-11-20T23:35:40,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T23:35:40,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 016bb07afb95e5389c469e78d274caf0, server=412a5e44fd2e,33793,1732145737422 in 201 msec 2024-11-20T23:35:40,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T23:35:40,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=016bb07afb95e5389c469e78d274caf0, ASSIGN in 369 msec 2024-11-20T23:35:40,674 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T23:35:40,675 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732145740674"}]},"ts":"1732145740674"} 2024-11-20T23:35:40,679 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-20T23:35:40,681 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T23:35:40,686 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 482 msec 2024-11-20T23:35:45,296 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T23:35:45,368 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T23:35:45,371 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-20T23:35:46,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T23:35:47,000 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T23:35:47,001 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-20T23:35:47,001 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-20T23:35:47,002 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:35:47,002 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T23:35:47,002 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T23:35:47,002 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T23:35:50,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T23:35:50,289 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-20T23:35:50,293 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-20T23:35:50,300 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-20T23:35:50,300 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:35:50,301 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C33793%2C1732145737422.1732145750301 2024-11-20T23:35:50,310 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:35:50,310 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:35:50,311 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:35:50,311 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:35:50,311 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:35:50,311 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145739386 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145750301 2024-11-20T23:35:50,313 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34397:34397),(127.0.0.1/127.0.0.1:35935:35935)] 2024-11-20T23:35:50,313 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145739386 is not closed yet, will try archiving it next time 2024-11-20T23:35:50,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741833_1009 (size=451) 2024-11-20T23:35:50,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741833_1009 (size=451) 2024-11-20T23:35:50,317 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145739386 to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/oldWALs/412a5e44fd2e%2C33793%2C1732145737422.1732145739386 2024-11-20T23:35:50,325 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0., hostname=412a5e44fd2e,33793,1732145737422, seqNum=2] 2024-11-20T23:36:02,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33793 {}] regionserver.HRegion(8855): Flush requested on 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:36:02,370 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 016bb07afb95e5389c469e78d274caf0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T23:36:02,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/72c8deb1f1ff4b118bdcba75cc6d03f8 is 1080, key is row0001/info:/1732145750328/Put/seqid=0 2024-11-20T23:36:02,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741838_1014 (size=12509) 2024-11-20T23:36:02,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/72c8deb1f1ff4b118bdcba75cc6d03f8 2024-11-20T23:36:02,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741838_1014 (size=12509) 2024-11-20T23:36:02,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/72c8deb1f1ff4b118bdcba75cc6d03f8 as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/72c8deb1f1ff4b118bdcba75cc6d03f8 2024-11-20T23:36:02,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/72c8deb1f1ff4b118bdcba75cc6d03f8, entries=7, sequenceid=11, filesize=12.2 K 2024-11-20T23:36:02,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 016bb07afb95e5389c469e78d274caf0 in 237ms, sequenceid=11, compaction requested=false 2024-11-20T23:36:02,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 016bb07afb95e5389c469e78d274caf0: 2024-11-20T23:36:05,724 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T23:36:10,380 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C33793%2C1732145737422.1732145770380 2024-11-20T23:36:10,592 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 209 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK], DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK]] 2024-11-20T23:36:10,593 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:10,593 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:10,593 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:10,593 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:10,593 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:10,594 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145750301 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145770380 2024-11-20T23:36:10,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741837_1013 (size=12399) 2024-11-20T23:36:10,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741837_1013 (size=12399) 2024-11-20T23:36:10,599 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34397:34397),(127.0.0.1/127.0.0.1:35935:35935)] 2024-11-20T23:36:10,802 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK], DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK]] 2024-11-20T23:36:13,007 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK], DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK]] 2024-11-20T23:36:15,216 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK], DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK]] 2024-11-20T23:36:17,428 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK], DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK]] 2024-11-20T23:36:17,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33793 {}] regionserver.HRegion(8855): Flush requested on 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:36:17,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 016bb07afb95e5389c469e78d274caf0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T23:36:17,631 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK], DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK]] 2024-11-20T23:36:17,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/73b11d1407d1446e86ec67ee93a0c0cb is 1080, key is row0008/info:/1732145764368/Put/seqid=0 2024-11-20T23:36:17,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741840_1016 (size=12509) 2024-11-20T23:36:17,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741840_1016 (size=12509) 2024-11-20T23:36:17,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/73b11d1407d1446e86ec67ee93a0c0cb 2024-11-20T23:36:17,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/73b11d1407d1446e86ec67ee93a0c0cb as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/73b11d1407d1446e86ec67ee93a0c0cb 2024-11-20T23:36:17,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/73b11d1407d1446e86ec67ee93a0c0cb, entries=7, sequenceid=21, filesize=12.2 K 2024-11-20T23:36:17,879 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK], DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK]] 2024-11-20T23:36:17,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 016bb07afb95e5389c469e78d274caf0 in 451ms, sequenceid=21, compaction requested=false 2024-11-20T23:36:17,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 016bb07afb95e5389c469e78d274caf0: 2024-11-20T23:36:17,880 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-20T23:36:17,880 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:36:17,881 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/72c8deb1f1ff4b118bdcba75cc6d03f8 because midkey is the same as first or last row 2024-11-20T23:36:19,633 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK], DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK]] 2024-11-20T23:36:20,792 INFO [master/412a5e44fd2e:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T23:36:20,792 INFO [master/412a5e44fd2e:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T23:36:21,838 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK], DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK]] 2024-11-20T23:36:21,840 WARN [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK], DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK]] 2024-11-20T23:36:21,842 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C33793%2C1732145737422:(num 1732145770380) roll requested 2024-11-20T23:36:21,843 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C33793%2C1732145737422.1732145781843 2024-11-20T23:36:22,054 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK], DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK]] 2024-11-20T23:36:22,054 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:22,054 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:22,055 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:22,055 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:22,055 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:22,055 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145770380 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145781843 2024-11-20T23:36:22,056 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35935:35935),(127.0.0.1/127.0.0.1:34397:34397)] 2024-11-20T23:36:22,056 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145770380 is not closed yet, will try archiving it next time 2024-11-20T23:36:22,056 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145750301 to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/oldWALs/412a5e44fd2e%2C33793%2C1732145737422.1732145750301 2024-11-20T23:36:22,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741839_1015 (size=7739) 2024-11-20T23:36:22,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741839_1015 (size=7739) 2024-11-20T23:36:24,042 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:25,629 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 016bb07afb95e5389c469e78d274caf0, had cached 0 bytes from a total of 25018 2024-11-20T23:36:26,247 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:28,453 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:30,659 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:32,661 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T23:36:32,661 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C33793%2C1732145737422.1732145792661 2024-11-20T23:36:35,725 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T23:36:37,669 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:37,671 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:37,671 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C33793%2C1732145737422:(num 1732145792661) roll requested 2024-11-20T23:36:37,671 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:37,671 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:37,671 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:37,671 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:37,672 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:37,672 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145781843 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145792661 2024-11-20T23:36:37,673 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35935:35935),(127.0.0.1/127.0.0.1:34397:34397)] 2024-11-20T23:36:37,673 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145781843 is not closed yet, will try archiving it next time 2024-11-20T23:36:37,673 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C33793%2C1732145737422.1732145797673 2024-11-20T23:36:37,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741841_1017 (size=4753) 2024-11-20T23:36:37,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741841_1017 (size=4753) 2024-11-20T23:36:42,677 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:42,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33793 {}] regionserver.HRegion(8855): Flush requested on 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:36:42,677 WARN [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:42,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 016bb07afb95e5389c469e78d274caf0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T23:36:42,683 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:42,683 WARN [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:44,678 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T23:36:47,679 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:47,680 WARN [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:47,680 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:47,680 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:47,680 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:47,680 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:47,680 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:47,681 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145792661 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145797673 2024-11-20T23:36:47,682 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35935:35935),(127.0.0.1/127.0.0.1:34397:34397)] 2024-11-20T23:36:47,682 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145792661 is not closed yet, will try archiving it next time 2024-11-20T23:36:47,682 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C33793%2C1732145737422:(num 1732145797673) roll requested 2024-11-20T23:36:47,682 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C33793%2C1732145737422.1732145807682 2024-11-20T23:36:47,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741842_1018 (size=1569) 2024-11-20T23:36:47,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741842_1018 (size=1569) 2024-11-20T23:36:47,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/df98bbc0426449ed9d156aed088ec6ab is 1080, key is row0015/info:/1732145779431/Put/seqid=0 2024-11-20T23:36:47,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741844_1020 (size=12509) 2024-11-20T23:36:47,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741844_1020 (size=12509) 2024-11-20T23:36:47,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/df98bbc0426449ed9d156aed088ec6ab 2024-11-20T23:36:47,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/df98bbc0426449ed9d156aed088ec6ab as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/df98bbc0426449ed9d156aed088ec6ab 2024-11-20T23:36:47,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/df98bbc0426449ed9d156aed088ec6ab, entries=7, sequenceid=31, filesize=12.2 K 2024-11-20T23:36:52,693 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:52,693 WARN [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:52,721 INFO [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:52,721 WARN [FSHLog-0-hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346-prefix:412a5e44fd2e,33793,1732145737422 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39693,DS-4260de69-9d7e-48fa-89dc-8f5425639a25,DISK], DatanodeInfoWithStorage[127.0.0.1:38923,DS-bd1e1c0d-7415-4c10-b681-54b71fd3acd1,DISK]] 2024-11-20T23:36:52,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 016bb07afb95e5389c469e78d274caf0 in 10044ms, sequenceid=31, compaction requested=true 2024-11-20T23:36:52,722 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 016bb07afb95e5389c469e78d274caf0: 2024-11-20T23:36:52,722 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-20T23:36:52,722 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:36:52,723 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/72c8deb1f1ff4b118bdcba75cc6d03f8 because midkey is the same as first or last row 2024-11-20T23:36:52,723 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,723 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,724 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,724 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,725 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145797673 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145807682 2024-11-20T23:36:52,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 016bb07afb95e5389c469e78d274caf0:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:36:52,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741843_1019 (size=438) 2024-11-20T23:36:52,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:36:52,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741843_1019 (size=438) 2024-11-20T23:36:52,728 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:36:52,729 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145770380 to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/oldWALs/412a5e44fd2e%2C33793%2C1732145737422.1732145770380 2024-11-20T23:36:52,730 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145781843 to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/oldWALs/412a5e44fd2e%2C33793%2C1732145737422.1732145781843 2024-11-20T23:36:52,731 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35935:35935),(127.0.0.1/127.0.0.1:34397:34397)] 2024-11-20T23:36:52,731 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C33793%2C1732145737422:(num 1732145807682) roll requested 2024-11-20T23:36:52,732 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C33793%2C1732145737422.1732145812732 2024-11-20T23:36:52,735 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145792661 to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/oldWALs/412a5e44fd2e%2C33793%2C1732145737422.1732145792661 2024-11-20T23:36:52,735 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:36:52,737 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.HStore(1541): 016bb07afb95e5389c469e78d274caf0/info is initiating minor compaction (all files) 2024-11-20T23:36:52,738 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145797673 to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/oldWALs/412a5e44fd2e%2C33793%2C1732145737422.1732145797673 2024-11-20T23:36:52,738 INFO [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 016bb07afb95e5389c469e78d274caf0/info in TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:36:52,738 INFO [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/72c8deb1f1ff4b118bdcba75cc6d03f8, hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/73b11d1407d1446e86ec67ee93a0c0cb, hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/df98bbc0426449ed9d156aed088ec6ab] into tmpdir=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp, totalSize=36.6 K 2024-11-20T23:36:52,741 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] compactions.Compactor(225): Compacting 72c8deb1f1ff4b118bdcba75cc6d03f8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732145750328 2024-11-20T23:36:52,742 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] compactions.Compactor(225): Compacting 73b11d1407d1446e86ec67ee93a0c0cb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732145764368 2024-11-20T23:36:52,743 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] compactions.Compactor(225): Compacting df98bbc0426449ed9d156aed088ec6ab, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732145779431 2024-11-20T23:36:52,756 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,756 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,757 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,757 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,757 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,757 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145807682 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145812732 2024-11-20T23:36:52,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741845_1021 (size=93) 2024-11-20T23:36:52,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741845_1021 (size=93) 2024-11-20T23:36:52,762 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145807682 to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/oldWALs/412a5e44fd2e%2C33793%2C1732145737422.1732145807682 2024-11-20T23:36:52,762 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35935:35935),(127.0.0.1/127.0.0.1:34397:34397)] 2024-11-20T23:36:52,763 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C33793%2C1732145737422.1732145812762 2024-11-20T23:36:52,778 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,778 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,778 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,778 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,778 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:36:52,778 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145812732 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/WALs/412a5e44fd2e,33793,1732145737422/412a5e44fd2e%2C33793%2C1732145737422.1732145812762 2024-11-20T23:36:52,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741846_1022 (size=1258) 2024-11-20T23:36:52,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741846_1022 (size=1258) 2024-11-20T23:36:52,792 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34397:34397),(127.0.0.1/127.0.0.1:35935:35935)] 2024-11-20T23:36:52,802 INFO [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 016bb07afb95e5389c469e78d274caf0#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:36:52,803 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/ed597c5e068547008cfa12c24785865a is 1080, key is row0001/info:/1732145750328/Put/seqid=0 2024-11-20T23:36:52,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741848_1024 (size=27710) 2024-11-20T23:36:52,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741848_1024 (size=27710) 2024-11-20T23:36:52,823 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/ed597c5e068547008cfa12c24785865a as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/ed597c5e068547008cfa12c24785865a 2024-11-20T23:36:52,843 INFO [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 016bb07afb95e5389c469e78d274caf0/info of 016bb07afb95e5389c469e78d274caf0 into ed597c5e068547008cfa12c24785865a(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:36:52,843 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 016bb07afb95e5389c469e78d274caf0: 2024-11-20T23:36:52,845 INFO [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0., storeName=016bb07afb95e5389c469e78d274caf0/info, priority=13, startTime=1732145812725; duration=0sec 2024-11-20T23:36:52,846 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-20T23:36:52,846 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:36:52,846 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/ed597c5e068547008cfa12c24785865a because midkey is the same as first or last row 2024-11-20T23:36:52,846 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-20T23:36:52,846 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:36:52,846 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/ed597c5e068547008cfa12c24785865a because midkey is the same as first or last row 2024-11-20T23:36:52,846 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-20T23:36:52,846 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:36:52,846 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/ed597c5e068547008cfa12c24785865a because midkey is the same as first or last row 2024-11-20T23:36:52,846 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:36:52,847 DEBUG [RS:0;412a5e44fd2e:33793-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 016bb07afb95e5389c469e78d274caf0:info 2024-11-20T23:37:04,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33793 {}] regionserver.HRegion(8855): Flush requested on 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:37:04,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 016bb07afb95e5389c469e78d274caf0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T23:37:04,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/14f456659d6146d3841fecd4b10ff89a is 1080, key is row0022/info:/1732145812764/Put/seqid=0 2024-11-20T23:37:04,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741849_1025 (size=12509) 2024-11-20T23:37:04,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741849_1025 (size=12509) 2024-11-20T23:37:04,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/14f456659d6146d3841fecd4b10ff89a 2024-11-20T23:37:04,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/14f456659d6146d3841fecd4b10ff89a as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/14f456659d6146d3841fecd4b10ff89a 2024-11-20T23:37:04,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/14f456659d6146d3841fecd4b10ff89a, entries=7, sequenceid=42, filesize=12.2 K 2024-11-20T23:37:04,853 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 016bb07afb95e5389c469e78d274caf0 in 47ms, sequenceid=42, compaction requested=false 2024-11-20T23:37:04,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 016bb07afb95e5389c469e78d274caf0: 2024-11-20T23:37:04,853 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-20T23:37:04,854 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:37:04,854 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/ed597c5e068547008cfa12c24785865a because midkey is the same as first or last row 2024-11-20T23:37:05,725 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T23:37:10,629 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 016bb07afb95e5389c469e78d274caf0, had cached 0 bytes from a total of 40219 2024-11-20T23:37:12,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T23:37:12,826 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:37:12,826 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:37:12,834 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:37:12,835 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:37:12,835 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T23:37:12,835 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T23:37:12,835 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1099508225, stopped=false 2024-11-20T23:37:12,836 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=412a5e44fd2e,44935,1732145736648 2024-11-20T23:37:12,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:37:12,848 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:37:12,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:12,848 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:12,848 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:37:12,848 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:37:12,849 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:37:12,849 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:37:12,849 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:37:12,849 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:37:12,849 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '412a5e44fd2e,33793,1732145737422' ***** 2024-11-20T23:37:12,850 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T23:37:12,850 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T23:37:12,851 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T23:37:12,851 INFO [RS:0;412a5e44fd2e:33793 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T23:37:12,851 INFO [RS:0;412a5e44fd2e:33793 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T23:37:12,851 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(3091): Received CLOSE for 016bb07afb95e5389c469e78d274caf0 2024-11-20T23:37:12,852 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(959): stopping server 412a5e44fd2e,33793,1732145737422 2024-11-20T23:37:12,852 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:37:12,852 INFO [RS:0;412a5e44fd2e:33793 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;412a5e44fd2e:33793. 2024-11-20T23:37:12,852 DEBUG [RS:0;412a5e44fd2e:33793 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:37:12,852 DEBUG [RS:0;412a5e44fd2e:33793 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:37:12,852 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 016bb07afb95e5389c469e78d274caf0, disabling compactions & flushes 2024-11-20T23:37:12,852 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:37:12,853 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T23:37:12,853 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:37:12,853 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T23:37:12,853 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T23:37:12,853 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. after waiting 0 ms 2024-11-20T23:37:12,853 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:37:12,853 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T23:37:12,853 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 016bb07afb95e5389c469e78d274caf0 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-20T23:37:12,854 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T23:37:12,854 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:37:12,854 DEBUG [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 016bb07afb95e5389c469e78d274caf0=TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.} 2024-11-20T23:37:12,854 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:37:12,854 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:37:12,854 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:37:12,854 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:37:12,854 DEBUG [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(1351): Waiting on 016bb07afb95e5389c469e78d274caf0, 1588230740 2024-11-20T23:37:12,854 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-20T23:37:12,861 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/952691b6bf9a487a9f6b37add106aaf9 is 1080, key is row0029/info:/1732145826810/Put/seqid=0 2024-11-20T23:37:12,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741850_1026 (size=8193) 2024-11-20T23:37:12,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741850_1026 (size=8193) 2024-11-20T23:37:12,873 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/952691b6bf9a487a9f6b37add106aaf9 2024-11-20T23:37:12,880 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/.tmp/info/266aa0afb5b246a2bdcdac6f556dfb7c is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0./info:regioninfo/1732145740656/Put/seqid=0 2024-11-20T23:37:12,885 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/.tmp/info/952691b6bf9a487a9f6b37add106aaf9 as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/952691b6bf9a487a9f6b37add106aaf9 2024-11-20T23:37:12,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741851_1027 (size=7016) 2024-11-20T23:37:12,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741851_1027 (size=7016) 2024-11-20T23:37:12,891 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/.tmp/info/266aa0afb5b246a2bdcdac6f556dfb7c 2024-11-20T23:37:12,899 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/952691b6bf9a487a9f6b37add106aaf9, entries=3, sequenceid=48, filesize=8.0 K 2024-11-20T23:37:12,901 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 016bb07afb95e5389c469e78d274caf0 in 48ms, sequenceid=48, compaction requested=true 2024-11-20T23:37:12,902 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/72c8deb1f1ff4b118bdcba75cc6d03f8, hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/73b11d1407d1446e86ec67ee93a0c0cb, hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/df98bbc0426449ed9d156aed088ec6ab] to archive 2024-11-20T23:37:12,906 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T23:37:12,911 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/72c8deb1f1ff4b118bdcba75cc6d03f8 to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/archive/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/72c8deb1f1ff4b118bdcba75cc6d03f8 2024-11-20T23:37:12,914 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/73b11d1407d1446e86ec67ee93a0c0cb to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/archive/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/73b11d1407d1446e86ec67ee93a0c0cb 2024-11-20T23:37:12,917 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/df98bbc0426449ed9d156aed088ec6ab to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/archive/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/info/df98bbc0426449ed9d156aed088ec6ab 2024-11-20T23:37:12,925 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/.tmp/ns/2589785f1bfc446bb6865b2351c3a7c5 is 43, key is default/ns:d/1732145739946/Put/seqid=0 2024-11-20T23:37:12,931 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=412a5e44fd2e:44935 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-20T23:37:12,936 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [72c8deb1f1ff4b118bdcba75cc6d03f8=12509, 73b11d1407d1446e86ec67ee93a0c0cb=12509, df98bbc0426449ed9d156aed088ec6ab=12509] 2024-11-20T23:37:12,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741852_1028 (size=5153) 2024-11-20T23:37:12,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741852_1028 (size=5153) 2024-11-20T23:37:12,943 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/default/TestLogRolling-testSlowSyncLogRolling/016bb07afb95e5389c469e78d274caf0/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-20T23:37:12,945 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:37:12,945 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 016bb07afb95e5389c469e78d274caf0: Waiting for close lock at 1732145832852Running coprocessor pre-close hooks at 1732145832852Disabling compacts and flushes for region at 1732145832852Disabling writes for close at 1732145832853 (+1 ms)Obtaining lock to block concurrent updates at 1732145832853Preparing flush snapshotting stores in 016bb07afb95e5389c469e78d274caf0 at 1732145832853Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732145832854 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. at 1732145832855 (+1 ms)Flushing 016bb07afb95e5389c469e78d274caf0/info: creating writer at 1732145832855Flushing 016bb07afb95e5389c469e78d274caf0/info: appending metadata at 1732145832860 (+5 ms)Flushing 016bb07afb95e5389c469e78d274caf0/info: closing flushed file at 1732145832860Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1356b840: reopening flushed file at 1732145832883 (+23 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 016bb07afb95e5389c469e78d274caf0 in 48ms, sequenceid=48, compaction requested=true at 1732145832901 (+18 ms)Writing region close event to WAL at 1732145832937 (+36 ms)Running coprocessor post-close hooks at 1732145832944 (+7 ms)Closed at 1732145832945 (+1 ms) 2024-11-20T23:37:12,946 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732145740192.016bb07afb95e5389c469e78d274caf0. 2024-11-20T23:37:13,054 DEBUG [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-20T23:37:13,227 INFO [regionserver/412a5e44fd2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T23:37:13,228 INFO [regionserver/412a5e44fd2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T23:37:13,233 INFO [regionserver/412a5e44fd2e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:37:13,255 DEBUG [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-20T23:37:13,339 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/.tmp/ns/2589785f1bfc446bb6865b2351c3a7c5 2024-11-20T23:37:13,367 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/.tmp/table/1c741f967ede4ee5b3a659ec42ea3500 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732145740674/Put/seqid=0 2024-11-20T23:37:13,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741853_1029 (size=5396) 2024-11-20T23:37:13,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741853_1029 (size=5396) 2024-11-20T23:37:13,373 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/.tmp/table/1c741f967ede4ee5b3a659ec42ea3500 2024-11-20T23:37:13,380 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/.tmp/info/266aa0afb5b246a2bdcdac6f556dfb7c as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/info/266aa0afb5b246a2bdcdac6f556dfb7c 2024-11-20T23:37:13,388 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/info/266aa0afb5b246a2bdcdac6f556dfb7c, entries=10, sequenceid=11, filesize=6.9 K 2024-11-20T23:37:13,390 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/.tmp/ns/2589785f1bfc446bb6865b2351c3a7c5 as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/ns/2589785f1bfc446bb6865b2351c3a7c5 2024-11-20T23:37:13,398 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/ns/2589785f1bfc446bb6865b2351c3a7c5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T23:37:13,399 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/.tmp/table/1c741f967ede4ee5b3a659ec42ea3500 as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/table/1c741f967ede4ee5b3a659ec42ea3500 2024-11-20T23:37:13,408 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/table/1c741f967ede4ee5b3a659ec42ea3500, entries=2, sequenceid=11, filesize=5.3 K 2024-11-20T23:37:13,410 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 556ms, sequenceid=11, compaction requested=false 2024-11-20T23:37:13,416 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T23:37:13,416 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:37:13,417 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:37:13,417 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732145832854Running coprocessor pre-close hooks at 1732145832854Disabling compacts and flushes for region at 1732145832854Disabling writes for close at 1732145832854Obtaining lock to block concurrent updates at 1732145832854Preparing flush snapshotting stores in 1588230740 at 1732145832854Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732145832855 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732145832856 (+1 ms)Flushing 1588230740/info: creating writer at 1732145832856Flushing 1588230740/info: appending metadata at 1732145832879 (+23 ms)Flushing 1588230740/info: closing flushed file at 1732145832879Flushing 1588230740/ns: creating writer at 1732145832905 (+26 ms)Flushing 1588230740/ns: appending metadata at 1732145832925 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1732145832925Flushing 1588230740/table: creating writer at 1732145833350 (+425 ms)Flushing 1588230740/table: appending metadata at 1732145833366 (+16 ms)Flushing 1588230740/table: closing flushed file at 1732145833366Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1866c621: reopening flushed file at 1732145833379 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35d21e12: reopening flushed file at 1732145833388 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ed849c0: reopening flushed file at 1732145833398 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 556ms, sequenceid=11, compaction requested=false at 1732145833410 (+12 ms)Writing region close event to WAL at 1732145833411 (+1 ms)Running coprocessor post-close hooks at 1732145833416 (+5 ms)Closed at 1732145833417 (+1 ms) 2024-11-20T23:37:13,417 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T23:37:13,456 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(976): stopping server 412a5e44fd2e,33793,1732145737422; all regions closed. 2024-11-20T23:37:13,458 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,459 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,459 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,459 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,460 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741834_1010 (size=3066) 2024-11-20T23:37:13,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741834_1010 (size=3066) 2024-11-20T23:37:13,469 DEBUG [RS:0;412a5e44fd2e:33793 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/oldWALs 2024-11-20T23:37:13,469 INFO [RS:0;412a5e44fd2e:33793 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C33793%2C1732145737422.meta:.meta(num 1732145739760) 2024-11-20T23:37:13,470 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,470 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,470 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,470 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,470 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741847_1023 (size=12695) 2024-11-20T23:37:13,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741847_1023 (size=12695) 2024-11-20T23:37:13,476 DEBUG [RS:0;412a5e44fd2e:33793 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/oldWALs 2024-11-20T23:37:13,477 INFO [RS:0;412a5e44fd2e:33793 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C33793%2C1732145737422:(num 1732145812762) 2024-11-20T23:37:13,477 DEBUG [RS:0;412a5e44fd2e:33793 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:37:13,477 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:37:13,477 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:37:13,477 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.ChoreService(370): Chore service for: regionserver/412a5e44fd2e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T23:37:13,477 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:37:13,477 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:37:13,478 INFO [RS:0;412a5e44fd2e:33793 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33793 2024-11-20T23:37:13,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:37:13,535 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/412a5e44fd2e,33793,1732145737422 2024-11-20T23:37:13,535 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:37:13,536 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [412a5e44fd2e,33793,1732145737422] 2024-11-20T23:37:13,556 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/412a5e44fd2e,33793,1732145737422 already deleted, retry=false 2024-11-20T23:37:13,556 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 412a5e44fd2e,33793,1732145737422 expired; onlineServers=0 2024-11-20T23:37:13,557 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '412a5e44fd2e,44935,1732145736648' ***** 2024-11-20T23:37:13,557 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T23:37:13,557 INFO [M:0;412a5e44fd2e:44935 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:37:13,557 INFO [M:0;412a5e44fd2e:44935 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:37:13,557 DEBUG [M:0;412a5e44fd2e:44935 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T23:37:13,557 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T23:37:13,557 DEBUG [M:0;412a5e44fd2e:44935 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T23:37:13,557 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145739030 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145739030,5,FailOnTimeoutGroup] 2024-11-20T23:37:13,557 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145739031 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145739031,5,FailOnTimeoutGroup] 2024-11-20T23:37:13,558 INFO [M:0;412a5e44fd2e:44935 {}] hbase.ChoreService(370): Chore service for: master/412a5e44fd2e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T23:37:13,558 INFO [M:0;412a5e44fd2e:44935 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:37:13,558 DEBUG [M:0;412a5e44fd2e:44935 {}] master.HMaster(1795): Stopping service threads 2024-11-20T23:37:13,558 INFO [M:0;412a5e44fd2e:44935 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T23:37:13,558 INFO [M:0;412a5e44fd2e:44935 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:37:13,559 INFO [M:0;412a5e44fd2e:44935 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T23:37:13,559 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T23:37:13,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T23:37:13,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:13,567 DEBUG [M:0;412a5e44fd2e:44935 {}] zookeeper.ZKUtil(347): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T23:37:13,567 WARN [M:0;412a5e44fd2e:44935 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T23:37:13,568 INFO [M:0;412a5e44fd2e:44935 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/.lastflushedseqids 2024-11-20T23:37:13,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741854_1030 (size=130) 2024-11-20T23:37:13,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741854_1030 (size=130) 2024-11-20T23:37:13,589 INFO [M:0;412a5e44fd2e:44935 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T23:37:13,589 INFO [M:0;412a5e44fd2e:44935 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T23:37:13,589 DEBUG [M:0;412a5e44fd2e:44935 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:37:13,589 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:13,589 DEBUG [M:0;412a5e44fd2e:44935 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:13,589 DEBUG [M:0;412a5e44fd2e:44935 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:37:13,589 DEBUG [M:0;412a5e44fd2e:44935 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:13,589 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-20T23:37:13,607 DEBUG [M:0;412a5e44fd2e:44935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9e26ab37118b400eb5184ba54b03f106 is 82, key is hbase:meta,,1/info:regioninfo/1732145739835/Put/seqid=0 2024-11-20T23:37:13,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741855_1031 (size=5672) 2024-11-20T23:37:13,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741855_1031 (size=5672) 2024-11-20T23:37:13,613 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9e26ab37118b400eb5184ba54b03f106 2024-11-20T23:37:13,639 DEBUG [M:0;412a5e44fd2e:44935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b2a261c666234d76ad25e3f447372466 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732145740684/Put/seqid=0 2024-11-20T23:37:13,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741856_1032 (size=6247) 2024-11-20T23:37:13,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741856_1032 (size=6247) 2024-11-20T23:37:13,646 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b2a261c666234d76ad25e3f447372466 2024-11-20T23:37:13,646 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:37:13,646 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33793-0x1015a9a642d0001, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:37:13,646 INFO [RS:0;412a5e44fd2e:33793 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:37:13,647 INFO [RS:0;412a5e44fd2e:33793 {}] regionserver.HRegionServer(1031): Exiting; stopping=412a5e44fd2e,33793,1732145737422; zookeeper connection closed. 2024-11-20T23:37:13,647 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5286f2dc {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5286f2dc 2024-11-20T23:37:13,647 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T23:37:13,653 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b2a261c666234d76ad25e3f447372466 2024-11-20T23:37:13,670 DEBUG [M:0;412a5e44fd2e:44935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d12b816423714682bcf2b83a0a66d36e is 69, key is 412a5e44fd2e,33793,1732145737422/rs:state/1732145739138/Put/seqid=0 2024-11-20T23:37:13,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741857_1033 (size=5156) 2024-11-20T23:37:13,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741857_1033 (size=5156) 2024-11-20T23:37:13,676 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d12b816423714682bcf2b83a0a66d36e 2024-11-20T23:37:13,701 DEBUG [M:0;412a5e44fd2e:44935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0b2d06b8bdc9499b9b0b15fae6e86763 is 52, key is load_balancer_on/state:d/1732145740173/Put/seqid=0 2024-11-20T23:37:13,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741858_1034 (size=5056) 2024-11-20T23:37:13,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741858_1034 (size=5056) 2024-11-20T23:37:13,708 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0b2d06b8bdc9499b9b0b15fae6e86763 2024-11-20T23:37:13,718 DEBUG [M:0;412a5e44fd2e:44935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9e26ab37118b400eb5184ba54b03f106 as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9e26ab37118b400eb5184ba54b03f106 2024-11-20T23:37:13,726 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9e26ab37118b400eb5184ba54b03f106, entries=8, sequenceid=59, filesize=5.5 K 2024-11-20T23:37:13,728 DEBUG [M:0;412a5e44fd2e:44935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b2a261c666234d76ad25e3f447372466 as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b2a261c666234d76ad25e3f447372466 2024-11-20T23:37:13,752 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b2a261c666234d76ad25e3f447372466 2024-11-20T23:37:13,752 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b2a261c666234d76ad25e3f447372466, entries=6, sequenceid=59, filesize=6.1 K 2024-11-20T23:37:13,754 DEBUG [M:0;412a5e44fd2e:44935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d12b816423714682bcf2b83a0a66d36e as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d12b816423714682bcf2b83a0a66d36e 2024-11-20T23:37:13,763 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d12b816423714682bcf2b83a0a66d36e, entries=1, sequenceid=59, filesize=5.0 K 2024-11-20T23:37:13,764 DEBUG [M:0;412a5e44fd2e:44935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0b2d06b8bdc9499b9b0b15fae6e86763 as hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0b2d06b8bdc9499b9b0b15fae6e86763 2024-11-20T23:37:13,772 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0b2d06b8bdc9499b9b0b15fae6e86763, entries=1, sequenceid=59, filesize=4.9 K 2024-11-20T23:37:13,773 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 184ms, sequenceid=59, compaction requested=false 2024-11-20T23:37:13,775 INFO [M:0;412a5e44fd2e:44935 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:13,775 DEBUG [M:0;412a5e44fd2e:44935 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732145833589Disabling compacts and flushes for region at 1732145833589Disabling writes for close at 1732145833589Obtaining lock to block concurrent updates at 1732145833590 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732145833590Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732145833590Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732145833591 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732145833591Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732145833606 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732145833606Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732145833620 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732145833638 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732145833638Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732145833653 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732145833669 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732145833669Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732145833683 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732145833700 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732145833700Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53e648ab: reopening flushed file at 1732145833717 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f846d7a: reopening flushed file at 1732145833726 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@205da65c: reopening flushed file at 1732145833753 (+27 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29453017: reopening flushed file at 1732145833763 (+10 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 184ms, sequenceid=59, compaction requested=false at 1732145833773 (+10 ms)Writing region close event to WAL at 1732145833775 (+2 ms)Closed at 1732145833775 2024-11-20T23:37:13,776 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,776 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,776 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,776 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,776 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:13,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39693 is added to blk_1073741830_1006 (size=27973) 2024-11-20T23:37:13,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741830_1006 (size=27973) 2024-11-20T23:37:13,779 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:37:13,780 INFO [M:0;412a5e44fd2e:44935 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T23:37:13,780 INFO [M:0;412a5e44fd2e:44935 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44935 2024-11-20T23:37:13,780 INFO [M:0;412a5e44fd2e:44935 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:37:13,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:37:13,888 INFO [M:0;412a5e44fd2e:44935 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:37:13,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44935-0x1015a9a642d0000, quorum=127.0.0.1:55252, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:37:13,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:13,895 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:37:13,895 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:37:13,895 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:37:13,895 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/hadoop.log.dir/,STOPPED} 2024-11-20T23:37:13,898 WARN [BP-1444344256-172.17.0.2-1732145732167 heartbeating to localhost/127.0.0.1:42045 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:37:13,898 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:37:13,898 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:37:13,898 WARN [BP-1444344256-172.17.0.2-1732145732167 heartbeating to localhost/127.0.0.1:42045 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1444344256-172.17.0.2-1732145732167 (Datanode Uuid a5b8cb3b-20d4-4a60-b10a-5f9b38cfd3d8) service to localhost/127.0.0.1:42045 2024-11-20T23:37:13,899 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/cluster_6b5435d9-f9b6-0860-04a5-363a5d7b51f7/data/data3/current/BP-1444344256-172.17.0.2-1732145732167 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:13,899 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/cluster_6b5435d9-f9b6-0860-04a5-363a5d7b51f7/data/data4/current/BP-1444344256-172.17.0.2-1732145732167 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:13,900 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:37:13,902 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:13,903 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:37:13,903 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:37:13,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:37:13,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/hadoop.log.dir/,STOPPED} 2024-11-20T23:37:13,905 WARN [BP-1444344256-172.17.0.2-1732145732167 heartbeating to localhost/127.0.0.1:42045 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:37:13,905 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:37:13,905 WARN [BP-1444344256-172.17.0.2-1732145732167 heartbeating to localhost/127.0.0.1:42045 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1444344256-172.17.0.2-1732145732167 (Datanode Uuid 34685de6-fcbe-4837-98b1-5cfe9cf69bba) service to localhost/127.0.0.1:42045 2024-11-20T23:37:13,905 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:37:13,905 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/cluster_6b5435d9-f9b6-0860-04a5-363a5d7b51f7/data/data1/current/BP-1444344256-172.17.0.2-1732145732167 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:13,906 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/cluster_6b5435d9-f9b6-0860-04a5-363a5d7b51f7/data/data2/current/BP-1444344256-172.17.0.2-1732145732167 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:13,906 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:37:13,917 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:37:13,917 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:37:13,917 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:37:13,917 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:37:13,918 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/hadoop.log.dir/,STOPPED} 2024-11-20T23:37:13,926 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T23:37:13,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T23:37:13,969 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42045 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42045 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/412a5e44fd2e:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42045 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42045 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@e1ae461 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: master/412a5e44fd2e:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42045 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:42045 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42045 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42045 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/412a5e44fd2e:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=280 (was 347), ProcessCount=11 (was 11), AvailableMemoryMB=1150 (was 1561) 2024-11-20T23:37:13,975 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=280, ProcessCount=11, AvailableMemoryMB=1149 2024-11-20T23:37:13,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T23:37:13,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/hadoop.log.dir so I do NOT create it in target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6 2024-11-20T23:37:13,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6de3d7d2-0656-6000-ddbd-183ac41ac406/hadoop.tmp.dir so I do NOT create it in target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6 2024-11-20T23:37:13,976 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/cluster_5f4fe31e-fafa-e3ea-229b-11af3830b9ec, deleteOnExit=true 2024-11-20T23:37:13,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T23:37:13,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/test.cache.data in system properties and HBase conf 2024-11-20T23:37:13,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T23:37:13,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/hadoop.log.dir in system properties and HBase conf 2024-11-20T23:37:13,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T23:37:13,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T23:37:13,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T23:37:13,977 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T23:37:13,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:37:13,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:37:13,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T23:37:13,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:37:13,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T23:37:13,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T23:37:13,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:37:13,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:37:13,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T23:37:13,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/nfs.dump.dir in system properties and HBase conf 2024-11-20T23:37:13,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/java.io.tmpdir in system properties and HBase conf 2024-11-20T23:37:13,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:37:13,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T23:37:13,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T23:37:13,992 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:37:14,355 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:37:14,362 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:37:14,363 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:37:14,364 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:37:14,364 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T23:37:14,364 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:37:14,365 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@641eaf99{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:37:14,365 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a15ed6a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:37:14,462 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ce0de36{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/java.io.tmpdir/jetty-localhost-40511-hadoop-hdfs-3_4_1-tests_jar-_-any-3482255323464170415/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:37:14,463 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3614f662{HTTP/1.1, (http/1.1)}{localhost:40511} 2024-11-20T23:37:14,463 INFO [Time-limited test {}] server.Server(415): Started @104663ms 2024-11-20T23:37:14,475 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:37:14,898 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:37:14,902 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:37:14,903 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:37:14,903 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:37:14,903 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:37:14,904 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa9c156{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:37:14,904 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e7873b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:37:15,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2cd60cfb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/java.io.tmpdir/jetty-localhost-46493-hadoop-hdfs-3_4_1-tests_jar-_-any-1059210829070956855/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:15,005 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f88e14b{HTTP/1.1, (http/1.1)}{localhost:46493} 2024-11-20T23:37:15,005 INFO [Time-limited test {}] server.Server(415): Started @105205ms 2024-11-20T23:37:15,006 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:37:15,054 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:37:15,059 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:37:15,060 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:37:15,061 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:37:15,061 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:37:15,062 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d5e070a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:37:15,062 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@573af0f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:37:15,172 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b5e52bc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/java.io.tmpdir/jetty-localhost-45931-hadoop-hdfs-3_4_1-tests_jar-_-any-14454889341343217355/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:15,172 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@882842c{HTTP/1.1, (http/1.1)}{localhost:45931} 2024-11-20T23:37:15,172 INFO [Time-limited test {}] server.Server(415): Started @105372ms 2024-11-20T23:37:15,174 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:37:16,239 WARN [Thread-454 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/cluster_5f4fe31e-fafa-e3ea-229b-11af3830b9ec/data/data2/current/BP-1483738985-172.17.0.2-1732145834006/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:16,239 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/cluster_5f4fe31e-fafa-e3ea-229b-11af3830b9ec/data/data1/current/BP-1483738985-172.17.0.2-1732145834006/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:16,255 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:37:16,258 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeef46fbbab6dc80d with lease ID 0xfaae56d200eb46a1: Processing first storage report for DS-017ab3fd-c73e-4a53-a4f7-4e6983940756 from datanode DatanodeRegistration(127.0.0.1:38693, datanodeUuid=ad735f0b-d537-4435-9567-d0c8ca680beb, infoPort=40643, infoSecurePort=0, ipcPort=45925, storageInfo=lv=-57;cid=testClusterID;nsid=353524311;c=1732145834006) 2024-11-20T23:37:16,258 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeef46fbbab6dc80d with lease ID 0xfaae56d200eb46a1: from storage DS-017ab3fd-c73e-4a53-a4f7-4e6983940756 node DatanodeRegistration(127.0.0.1:38693, datanodeUuid=ad735f0b-d537-4435-9567-d0c8ca680beb, infoPort=40643, infoSecurePort=0, ipcPort=45925, storageInfo=lv=-57;cid=testClusterID;nsid=353524311;c=1732145834006), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:16,258 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeef46fbbab6dc80d with lease ID 0xfaae56d200eb46a1: Processing first storage report for DS-72fa5341-4273-4d21-a627-2830187fbd1d from datanode DatanodeRegistration(127.0.0.1:38693, datanodeUuid=ad735f0b-d537-4435-9567-d0c8ca680beb, infoPort=40643, infoSecurePort=0, ipcPort=45925, storageInfo=lv=-57;cid=testClusterID;nsid=353524311;c=1732145834006) 2024-11-20T23:37:16,258 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeef46fbbab6dc80d with lease ID 0xfaae56d200eb46a1: from storage DS-72fa5341-4273-4d21-a627-2830187fbd1d node DatanodeRegistration(127.0.0.1:38693, datanodeUuid=ad735f0b-d537-4435-9567-d0c8ca680beb, infoPort=40643, infoSecurePort=0, ipcPort=45925, storageInfo=lv=-57;cid=testClusterID;nsid=353524311;c=1732145834006), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T23:37:16,353 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/cluster_5f4fe31e-fafa-e3ea-229b-11af3830b9ec/data/data4/current/BP-1483738985-172.17.0.2-1732145834006/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:16,353 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/cluster_5f4fe31e-fafa-e3ea-229b-11af3830b9ec/data/data3/current/BP-1483738985-172.17.0.2-1732145834006/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:16,373 WARN [Thread-440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:37:16,375 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa104288766a76951 with lease ID 0xfaae56d200eb46a2: Processing first storage report for DS-23ef0335-5feb-48aa-999a-ef8e13316589 from datanode DatanodeRegistration(127.0.0.1:36311, datanodeUuid=f0d1d127-7280-4a54-90db-a6f2091084ab, infoPort=39961, infoSecurePort=0, ipcPort=38929, storageInfo=lv=-57;cid=testClusterID;nsid=353524311;c=1732145834006) 2024-11-20T23:37:16,375 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa104288766a76951 with lease ID 0xfaae56d200eb46a2: from storage DS-23ef0335-5feb-48aa-999a-ef8e13316589 node DatanodeRegistration(127.0.0.1:36311, datanodeUuid=f0d1d127-7280-4a54-90db-a6f2091084ab, infoPort=39961, infoSecurePort=0, ipcPort=38929, storageInfo=lv=-57;cid=testClusterID;nsid=353524311;c=1732145834006), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:16,376 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa104288766a76951 with lease ID 0xfaae56d200eb46a2: Processing first storage report for DS-fcea9997-e112-472b-ab95-e76d6e7ecb13 from datanode DatanodeRegistration(127.0.0.1:36311, datanodeUuid=f0d1d127-7280-4a54-90db-a6f2091084ab, infoPort=39961, infoSecurePort=0, ipcPort=38929, storageInfo=lv=-57;cid=testClusterID;nsid=353524311;c=1732145834006) 2024-11-20T23:37:16,376 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa104288766a76951 with lease ID 0xfaae56d200eb46a2: from storage DS-fcea9997-e112-472b-ab95-e76d6e7ecb13 node DatanodeRegistration(127.0.0.1:36311, datanodeUuid=f0d1d127-7280-4a54-90db-a6f2091084ab, infoPort=39961, infoSecurePort=0, ipcPort=38929, storageInfo=lv=-57;cid=testClusterID;nsid=353524311;c=1732145834006), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:16,414 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6 2024-11-20T23:37:16,418 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/cluster_5f4fe31e-fafa-e3ea-229b-11af3830b9ec/zookeeper_0, clientPort=58818, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/cluster_5f4fe31e-fafa-e3ea-229b-11af3830b9ec/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/cluster_5f4fe31e-fafa-e3ea-229b-11af3830b9ec/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T23:37:16,419 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58818 2024-11-20T23:37:16,419 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:16,421 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:16,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:37:16,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:37:16,432 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d with version=8 2024-11-20T23:37:16,432 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/hbase-staging 2024-11-20T23:37:16,435 INFO [Time-limited test {}] client.ConnectionUtils(128): master/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:37:16,435 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:16,436 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:16,436 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:37:16,436 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:16,436 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:37:16,436 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T23:37:16,436 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:37:16,437 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32803 2024-11-20T23:37:16,438 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32803 connecting to ZooKeeper ensemble=127.0.0.1:58818 2024-11-20T23:37:16,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:328030x0, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:37:16,494 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32803-0x1015a9bed2c0000 connected 2024-11-20T23:37:16,581 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:16,587 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:16,592 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:37:16,592 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d, hbase.cluster.distributed=false 2024-11-20T23:37:16,594 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:37:16,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32803 2024-11-20T23:37:16,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32803 2024-11-20T23:37:16,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32803 2024-11-20T23:37:16,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32803 2024-11-20T23:37:16,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32803 2024-11-20T23:37:16,612 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:37:16,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:16,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:16,612 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:37:16,613 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:16,613 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:37:16,613 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T23:37:16,613 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:37:16,614 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40629 2024-11-20T23:37:16,616 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40629 connecting to ZooKeeper ensemble=127.0.0.1:58818 2024-11-20T23:37:16,617 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:16,621 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:16,633 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:406290x0, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:37:16,634 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:406290x0, quorum=127.0.0.1:58818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:37:16,634 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T23:37:16,644 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40629-0x1015a9bed2c0001 connected 2024-11-20T23:37:16,645 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T23:37:16,646 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T23:37:16,647 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:37:16,648 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40629 2024-11-20T23:37:16,649 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40629 2024-11-20T23:37:16,652 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40629 2024-11-20T23:37:16,654 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40629 2024-11-20T23:37:16,655 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40629 2024-11-20T23:37:16,667 DEBUG [M:0;412a5e44fd2e:32803 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;412a5e44fd2e:32803 2024-11-20T23:37:16,668 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/412a5e44fd2e,32803,1732145836435 2024-11-20T23:37:16,675 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:37:16,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:37:16,675 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/412a5e44fd2e,32803,1732145836435 2024-11-20T23:37:16,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:16,685 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T23:37:16,685 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:16,686 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T23:37:16,686 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/412a5e44fd2e,32803,1732145836435 from backup master directory 2024-11-20T23:37:16,696 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:37:16,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/412a5e44fd2e,32803,1732145836435 2024-11-20T23:37:16,696 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:37:16,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:37:16,696 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=412a5e44fd2e,32803,1732145836435 2024-11-20T23:37:16,702 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/hbase.id] with ID: 165d1bfb-f95c-4405-9ff1-2d508092aec6 2024-11-20T23:37:16,702 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/.tmp/hbase.id 2024-11-20T23:37:16,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:37:16,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:37:16,710 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/.tmp/hbase.id]:[hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/hbase.id] 2024-11-20T23:37:16,727 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:16,727 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T23:37:16,729 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-20T23:37:16,738 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:16,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:16,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:37:16,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:37:16,747 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T23:37:16,747 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T23:37:16,748 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:37:16,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:37:16,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:37:16,758 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store 2024-11-20T23:37:16,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:37:16,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:37:16,768 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:37:16,768 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:37:16,768 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:16,768 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:16,768 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:37:16,768 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:16,768 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:16,768 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732145836768Disabling compacts and flushes for region at 1732145836768Disabling writes for close at 1732145836768Writing region close event to WAL at 1732145836768Closed at 1732145836768 2024-11-20T23:37:16,770 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/.initializing 2024-11-20T23:37:16,770 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/WALs/412a5e44fd2e,32803,1732145836435 2024-11-20T23:37:16,773 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C32803%2C1732145836435, suffix=, logDir=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/WALs/412a5e44fd2e,32803,1732145836435, archiveDir=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/oldWALs, maxLogs=10 2024-11-20T23:37:16,774 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C32803%2C1732145836435.1732145836773 2024-11-20T23:37:16,780 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/WALs/412a5e44fd2e,32803,1732145836435/412a5e44fd2e%2C32803%2C1732145836435.1732145836773 2024-11-20T23:37:16,781 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40643:40643),(127.0.0.1/127.0.0.1:39961:39961)] 2024-11-20T23:37:16,781 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:37:16,781 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:37:16,782 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:16,782 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:16,783 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:16,785 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T23:37:16,785 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:16,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:16,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:16,788 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T23:37:16,788 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:16,788 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:37:16,789 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:16,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T23:37:16,791 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:16,792 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:37:16,792 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:16,794 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T23:37:16,794 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:16,794 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:37:16,795 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:16,796 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:16,797 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:16,798 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:16,798 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:16,799 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T23:37:16,801 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:16,803 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:37:16,804 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=752163, jitterRate=-0.04357597231864929}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T23:37:16,805 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732145836782Initializing all the Stores at 1732145836783 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145836783Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145836783Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145836783Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145836783Cleaning up temporary data from old regions at 1732145836798 (+15 ms)Region opened successfully at 1732145836805 (+7 ms) 2024-11-20T23:37:16,806 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T23:37:16,810 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dfe2e96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:37:16,811 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T23:37:16,812 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T23:37:16,812 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T23:37:16,812 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T23:37:16,813 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T23:37:16,813 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T23:37:16,813 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T23:37:16,816 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T23:37:16,818 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T23:37:16,829 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T23:37:16,829 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T23:37:16,830 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T23:37:16,843 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T23:37:16,844 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T23:37:16,845 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T23:37:16,853 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T23:37:16,855 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T23:37:16,864 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T23:37:16,867 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T23:37:16,874 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T23:37:16,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:37:16,885 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:37:16,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:16,885 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:16,886 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=412a5e44fd2e,32803,1732145836435, sessionid=0x1015a9bed2c0000, setting cluster-up flag (Was=false) 2024-11-20T23:37:16,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:16,906 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:16,938 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T23:37:16,942 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,32803,1732145836435 2024-11-20T23:37:16,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:16,966 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:16,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:37:16,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T23:37:17,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-20T23:37:17,001 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T23:37:17,002 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,32803,1732145836435 2024-11-20T23:37:17,004 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T23:37:17,006 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T23:37:17,006 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T23:37:17,006 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T23:37:17,007 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 412a5e44fd2e,32803,1732145836435 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T23:37:17,008 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:37:17,009 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:37:17,009 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:37:17,009 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:37:17,009 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/412a5e44fd2e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T23:37:17,009 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,009 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:37:17,009 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,010 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732145867010 2024-11-20T23:37:17,010 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T23:37:17,010 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T23:37:17,010 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T23:37:17,010 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T23:37:17,010 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T23:37:17,010 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T23:37:17,011 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,011 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T23:37:17,011 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T23:37:17,011 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T23:37:17,011 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:37:17,012 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T23:37:17,012 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T23:37:17,012 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T23:37:17,012 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145837012,5,FailOnTimeoutGroup] 2024-11-20T23:37:17,013 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145837012,5,FailOnTimeoutGroup] 2024-11-20T23:37:17,013 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,013 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T23:37:17,013 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,013 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,013 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:17,013 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T23:37:17,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:37:17,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:37:17,032 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T23:37:17,032 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d 2024-11-20T23:37:17,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:37:17,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:37:17,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:37:17,061 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(746): ClusterId : 165d1bfb-f95c-4405-9ff1-2d508092aec6 2024-11-20T23:37:17,061 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T23:37:17,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:37:17,066 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:37:17,067 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:17,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:17,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:37:17,070 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:37:17,070 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:17,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:17,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:37:17,073 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T23:37:17,073 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T23:37:17,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:37:17,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:17,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:17,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:37:17,077 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:37:17,077 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:17,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:17,078 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:37:17,079 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/data/hbase/meta/1588230740 2024-11-20T23:37:17,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/data/hbase/meta/1588230740 2024-11-20T23:37:17,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:37:17,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:37:17,082 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:37:17,086 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:37:17,091 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:37:17,092 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=829287, jitterRate=0.0544937402009964}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:37:17,093 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T23:37:17,093 DEBUG [RS:0;412a5e44fd2e:40629 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4eeaca7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:37:17,094 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732145837057Initializing all the Stores at 1732145837058 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145837058Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145837064 (+6 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145837064Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145837064Cleaning up temporary data from old regions at 1732145837081 (+17 ms)Region opened successfully at 1732145837093 (+12 ms) 2024-11-20T23:37:17,094 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:37:17,094 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:37:17,094 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:37:17,094 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:37:17,094 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:37:17,113 DEBUG [RS:0;412a5e44fd2e:40629 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;412a5e44fd2e:40629 2024-11-20T23:37:17,113 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T23:37:17,113 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T23:37:17,113 DEBUG [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T23:37:17,114 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(2659): reportForDuty to master=412a5e44fd2e,32803,1732145836435 with port=40629, startcode=1732145836612 2024-11-20T23:37:17,114 DEBUG [RS:0;412a5e44fd2e:40629 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T23:37:17,116 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:37:17,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732145837094Disabling compacts and flushes for region at 1732145837094Disabling writes for close at 1732145837094Writing region close event to WAL at 1732145837116 (+22 ms)Closed at 1732145837116 2024-11-20T23:37:17,119 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:37:17,119 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T23:37:17,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T23:37:17,123 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:37:17,125 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T23:37:17,135 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48011, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T23:37:17,136 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32803 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 412a5e44fd2e,40629,1732145836612 2024-11-20T23:37:17,136 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32803 {}] master.ServerManager(517): Registering regionserver=412a5e44fd2e,40629,1732145836612 2024-11-20T23:37:17,138 DEBUG [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d 2024-11-20T23:37:17,138 DEBUG [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34499 2024-11-20T23:37:17,139 DEBUG [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T23:37:17,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:37:17,149 DEBUG [RS:0;412a5e44fd2e:40629 {}] zookeeper.ZKUtil(111): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/412a5e44fd2e,40629,1732145836612 2024-11-20T23:37:17,149 WARN [RS:0;412a5e44fd2e:40629 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:37:17,149 INFO [RS:0;412a5e44fd2e:40629 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:37:17,149 DEBUG [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/WALs/412a5e44fd2e,40629,1732145836612 2024-11-20T23:37:17,151 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [412a5e44fd2e,40629,1732145836612] 2024-11-20T23:37:17,164 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T23:37:17,167 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T23:37:17,168 INFO [RS:0;412a5e44fd2e:40629 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T23:37:17,168 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,170 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T23:37:17,171 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T23:37:17,172 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,172 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,172 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,172 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,172 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,172 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,172 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:37:17,172 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,172 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,173 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,173 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,173 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,173 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:17,173 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:37:17,173 DEBUG [RS:0;412a5e44fd2e:40629 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:37:17,173 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,174 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,174 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,174 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,174 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,174 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,40629,1732145836612-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:37:17,195 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T23:37:17,195 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,40629,1732145836612-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,196 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,196 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.Replication(171): 412a5e44fd2e,40629,1732145836612 started 2024-11-20T23:37:17,216 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:17,216 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(1482): Serving as 412a5e44fd2e,40629,1732145836612, RpcServer on 412a5e44fd2e/172.17.0.2:40629, sessionid=0x1015a9bed2c0001 2024-11-20T23:37:17,216 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T23:37:17,216 DEBUG [RS:0;412a5e44fd2e:40629 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 412a5e44fd2e,40629,1732145836612 2024-11-20T23:37:17,216 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,40629,1732145836612' 2024-11-20T23:37:17,217 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T23:37:17,217 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T23:37:17,219 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T23:37:17,219 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T23:37:17,219 DEBUG [RS:0;412a5e44fd2e:40629 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 412a5e44fd2e,40629,1732145836612 2024-11-20T23:37:17,219 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,40629,1732145836612' 2024-11-20T23:37:17,219 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T23:37:17,220 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T23:37:17,221 DEBUG [RS:0;412a5e44fd2e:40629 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T23:37:17,221 INFO [RS:0;412a5e44fd2e:40629 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T23:37:17,221 INFO [RS:0;412a5e44fd2e:40629 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T23:37:17,275 WARN [412a5e44fd2e:32803 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T23:37:17,324 INFO [RS:0;412a5e44fd2e:40629 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C40629%2C1732145836612, suffix=, logDir=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/WALs/412a5e44fd2e,40629,1732145836612, archiveDir=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/oldWALs, maxLogs=32 2024-11-20T23:37:17,326 INFO [RS:0;412a5e44fd2e:40629 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C40629%2C1732145836612.1732145837326 2024-11-20T23:37:17,333 INFO [RS:0;412a5e44fd2e:40629 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/WALs/412a5e44fd2e,40629,1732145836612/412a5e44fd2e%2C40629%2C1732145836612.1732145837326 2024-11-20T23:37:17,335 DEBUG [RS:0;412a5e44fd2e:40629 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39961:39961),(127.0.0.1/127.0.0.1:40643:40643)] 2024-11-20T23:37:17,525 DEBUG [412a5e44fd2e:32803 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T23:37:17,526 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=412a5e44fd2e,40629,1732145836612 2024-11-20T23:37:17,528 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,40629,1732145836612, state=OPENING 2024-11-20T23:37:17,587 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T23:37:17,679 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:17,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:17,680 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:37:17,680 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:37:17,680 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,40629,1732145836612}] 2024-11-20T23:37:17,680 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:37:17,835 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T23:37:17,837 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48705, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T23:37:17,842 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T23:37:17,843 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:37:17,846 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C40629%2C1732145836612.meta, suffix=.meta, logDir=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/WALs/412a5e44fd2e,40629,1732145836612, archiveDir=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/oldWALs, maxLogs=32 2024-11-20T23:37:17,849 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C40629%2C1732145836612.meta.1732145837848.meta 2024-11-20T23:37:17,860 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/WALs/412a5e44fd2e,40629,1732145836612/412a5e44fd2e%2C40629%2C1732145836612.meta.1732145837848.meta 2024-11-20T23:37:17,867 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39961:39961),(127.0.0.1/127.0.0.1:40643:40643)] 2024-11-20T23:37:17,873 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:37:17,873 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T23:37:17,873 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T23:37:17,873 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T23:37:17,873 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T23:37:17,873 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:37:17,874 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T23:37:17,874 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T23:37:17,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:37:17,878 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:37:17,878 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:17,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:17,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:37:17,881 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:37:17,882 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:17,883 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:17,883 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:37:17,885 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:37:17,885 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:17,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:17,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:37:17,888 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:37:17,892 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:17,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:17,893 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:37:17,895 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/data/hbase/meta/1588230740 2024-11-20T23:37:17,898 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/data/hbase/meta/1588230740 2024-11-20T23:37:17,900 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:37:17,900 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:37:17,901 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:37:17,903 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:37:17,904 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=719092, jitterRate=-0.08562830090522766}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:37:17,905 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T23:37:17,906 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732145837874Writing region info on filesystem at 1732145837874Initializing all the Stores at 1732145837875 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145837875Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145837876 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145837876Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145837876Cleaning up temporary data from old regions at 1732145837900 (+24 ms)Running coprocessor post-open hooks at 1732145837905 (+5 ms)Region opened successfully at 1732145837906 (+1 ms) 2024-11-20T23:37:17,908 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732145837834 2024-11-20T23:37:17,911 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T23:37:17,912 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T23:37:17,913 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=412a5e44fd2e,40629,1732145836612 2024-11-20T23:37:17,915 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,40629,1732145836612, state=OPEN 2024-11-20T23:37:17,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:37:17,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:37:18,114 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:37:18,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:37:18,114 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:37:18,114 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:37:18,114 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=412a5e44fd2e,40629,1732145836612 2024-11-20T23:37:18,119 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T23:37:18,119 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,40629,1732145836612 in 434 msec 2024-11-20T23:37:18,123 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T23:37:18,123 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 999 msec 2024-11-20T23:37:18,125 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:37:18,125 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T23:37:18,127 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:37:18,127 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,40629,1732145836612, seqNum=-1] 2024-11-20T23:37:18,127 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:37:18,129 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57447, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:37:18,138 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1310 sec 2024-11-20T23:37:18,138 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732145838138, completionTime=-1 2024-11-20T23:37:18,138 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T23:37:18,138 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T23:37:18,141 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T23:37:18,141 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732145898141 2024-11-20T23:37:18,142 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732145958142 2024-11-20T23:37:18,142 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 3 msec 2024-11-20T23:37:18,142 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,32803,1732145836435-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:18,142 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,32803,1732145836435-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:18,142 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,32803,1732145836435-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:18,142 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-412a5e44fd2e:32803, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:18,143 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:18,143 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:18,145 DEBUG [master/412a5e44fd2e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T23:37:18,149 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.452sec 2024-11-20T23:37:18,149 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T23:37:18,149 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T23:37:18,149 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T23:37:18,149 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T23:37:18,149 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T23:37:18,149 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,32803,1732145836435-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:37:18,149 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,32803,1732145836435-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T23:37:18,152 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T23:37:18,152 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T23:37:18,152 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,32803,1732145836435-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:18,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bd69191, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:37:18,162 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 412a5e44fd2e,32803,-1 for getting cluster id 2024-11-20T23:37:18,162 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T23:37:18,164 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '165d1bfb-f95c-4405-9ff1-2d508092aec6' 2024-11-20T23:37:18,165 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T23:37:18,165 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "165d1bfb-f95c-4405-9ff1-2d508092aec6" 2024-11-20T23:37:18,166 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6076163e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:37:18,166 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [412a5e44fd2e,32803,-1] 2024-11-20T23:37:18,166 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T23:37:18,166 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:37:18,169 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37830, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T23:37:18,171 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e299140, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:37:18,172 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:37:18,173 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,40629,1732145836612, seqNum=-1] 2024-11-20T23:37:18,174 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:37:18,176 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35888, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:37:18,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=412a5e44fd2e,32803,1732145836435 2024-11-20T23:37:18,180 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:18,184 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T23:37:18,184 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T23:37:18,184 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:37:18,185 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:37:18,185 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:37:18,185 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:37:18,185 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T23:37:18,185 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T23:37:18,185 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=538851360, stopped=false 2024-11-20T23:37:18,186 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=412a5e44fd2e,32803,1732145836435 2024-11-20T23:37:18,201 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:37:18,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:37:18,201 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:37:18,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:18,201 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:18,201 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:37:18,201 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:37:18,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:37:18,202 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '412a5e44fd2e,40629,1732145836612' ***** 2024-11-20T23:37:18,202 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T23:37:18,202 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T23:37:18,202 INFO [RS:0;412a5e44fd2e:40629 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T23:37:18,202 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T23:37:18,202 INFO [RS:0;412a5e44fd2e:40629 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T23:37:18,202 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:37:18,202 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(959): stopping server 412a5e44fd2e,40629,1732145836612 2024-11-20T23:37:18,202 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:37:18,202 INFO [RS:0;412a5e44fd2e:40629 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;412a5e44fd2e:40629. 2024-11-20T23:37:18,203 DEBUG [RS:0;412a5e44fd2e:40629 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:37:18,203 DEBUG [RS:0;412a5e44fd2e:40629 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:37:18,203 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T23:37:18,203 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T23:37:18,203 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T23:37:18,203 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:37:18,203 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T23:37:18,203 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-20T23:37:18,203 DEBUG [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T23:37:18,203 DEBUG [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-20T23:37:18,204 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:37:18,204 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:37:18,204 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:37:18,204 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:37:18,204 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:37:18,204 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-20T23:37:18,226 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/data/hbase/meta/1588230740/.tmp/ns/c1209a4114a5427b98b1c73e591a06b3 is 43, key is default/ns:d/1732145838130/Put/seqid=0 2024-11-20T23:37:18,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741835_1011 (size=5153) 2024-11-20T23:37:18,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741835_1011 (size=5153) 2024-11-20T23:37:18,239 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/data/hbase/meta/1588230740/.tmp/ns/c1209a4114a5427b98b1c73e591a06b3 2024-11-20T23:37:18,251 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/data/hbase/meta/1588230740/.tmp/ns/c1209a4114a5427b98b1c73e591a06b3 as hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/data/hbase/meta/1588230740/ns/c1209a4114a5427b98b1c73e591a06b3 2024-11-20T23:37:18,265 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/data/hbase/meta/1588230740/ns/c1209a4114a5427b98b1c73e591a06b3, entries=2, sequenceid=6, filesize=5.0 K 2024-11-20T23:37:18,269 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 64ms, sequenceid=6, compaction requested=false 2024-11-20T23:37:18,269 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T23:37:18,285 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T23:37:18,286 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:37:18,286 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:37:18,287 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732145838203Running coprocessor pre-close hooks at 1732145838203Disabling compacts and flushes for region at 1732145838203Disabling writes for close at 1732145838204 (+1 ms)Obtaining lock to block concurrent updates at 1732145838204Preparing flush snapshotting stores in 1588230740 at 1732145838204Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732145838204Flushing stores of hbase:meta,,1.1588230740 at 1732145838205 (+1 ms)Flushing 1588230740/ns: creating writer at 1732145838205Flushing 1588230740/ns: appending metadata at 1732145838225 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1732145838225Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d06d4dc: reopening flushed file at 1732145838249 (+24 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 64ms, sequenceid=6, compaction requested=false at 1732145838269 (+20 ms)Writing region close event to WAL at 1732145838271 (+2 ms)Running coprocessor post-close hooks at 1732145838286 (+15 ms)Closed at 1732145838286 2024-11-20T23:37:18,287 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T23:37:18,404 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(976): stopping server 412a5e44fd2e,40629,1732145836612; all regions closed. 2024-11-20T23:37:18,404 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:18,405 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:18,405 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:18,405 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:18,405 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:18,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741834_1010 (size=1152) 2024-11-20T23:37:18,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741834_1010 (size=1152) 2024-11-20T23:37:18,417 DEBUG [RS:0;412a5e44fd2e:40629 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/oldWALs 2024-11-20T23:37:18,417 INFO [RS:0;412a5e44fd2e:40629 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C40629%2C1732145836612.meta:.meta(num 1732145837848) 2024-11-20T23:37:18,418 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:18,420 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:18,420 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:18,420 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:18,421 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:18,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741833_1009 (size=93) 2024-11-20T23:37:18,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741833_1009 (size=93) 2024-11-20T23:37:18,471 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T23:37:18,473 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:37:18,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:37:18,840 DEBUG [RS:0;412a5e44fd2e:40629 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/oldWALs 2024-11-20T23:37:18,840 INFO [RS:0;412a5e44fd2e:40629 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C40629%2C1732145836612:(num 1732145837326) 2024-11-20T23:37:18,840 DEBUG [RS:0;412a5e44fd2e:40629 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:37:18,840 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:37:18,840 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:37:18,840 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.ChoreService(370): Chore service for: regionserver/412a5e44fd2e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T23:37:18,841 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:37:18,841 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:37:18,841 INFO [RS:0;412a5e44fd2e:40629 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40629 2024-11-20T23:37:18,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:37:18,896 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/412a5e44fd2e,40629,1732145836612 2024-11-20T23:37:18,896 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:37:18,897 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [412a5e44fd2e,40629,1732145836612] 2024-11-20T23:37:18,917 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/412a5e44fd2e,40629,1732145836612 already deleted, retry=false 2024-11-20T23:37:18,917 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 412a5e44fd2e,40629,1732145836612 expired; onlineServers=0 2024-11-20T23:37:18,917 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '412a5e44fd2e,32803,1732145836435' ***** 2024-11-20T23:37:18,917 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T23:37:18,917 INFO [M:0;412a5e44fd2e:32803 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:37:18,917 INFO [M:0;412a5e44fd2e:32803 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:37:18,917 DEBUG [M:0;412a5e44fd2e:32803 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T23:37:18,917 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T23:37:18,917 DEBUG [M:0;412a5e44fd2e:32803 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T23:37:18,917 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145837012 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145837012,5,FailOnTimeoutGroup] 2024-11-20T23:37:18,917 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145837012 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145837012,5,FailOnTimeoutGroup] 2024-11-20T23:37:18,918 INFO [M:0;412a5e44fd2e:32803 {}] hbase.ChoreService(370): Chore service for: master/412a5e44fd2e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T23:37:18,918 INFO [M:0;412a5e44fd2e:32803 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:37:18,918 DEBUG [M:0;412a5e44fd2e:32803 {}] master.HMaster(1795): Stopping service threads 2024-11-20T23:37:18,918 INFO [M:0;412a5e44fd2e:32803 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T23:37:18,918 INFO [M:0;412a5e44fd2e:32803 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:37:18,918 INFO [M:0;412a5e44fd2e:32803 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T23:37:18,918 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T23:37:18,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T23:37:18,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:18,927 DEBUG [M:0;412a5e44fd2e:32803 {}] zookeeper.ZKUtil(347): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T23:37:18,928 WARN [M:0;412a5e44fd2e:32803 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T23:37:18,928 INFO [M:0;412a5e44fd2e:32803 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/.lastflushedseqids 2024-11-20T23:37:18,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741836_1012 (size=99) 2024-11-20T23:37:18,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741836_1012 (size=99) 2024-11-20T23:37:18,939 INFO [M:0;412a5e44fd2e:32803 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T23:37:18,940 INFO [M:0;412a5e44fd2e:32803 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T23:37:18,940 DEBUG [M:0;412a5e44fd2e:32803 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:37:18,940 INFO [M:0;412a5e44fd2e:32803 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:18,940 DEBUG [M:0;412a5e44fd2e:32803 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:18,940 DEBUG [M:0;412a5e44fd2e:32803 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:37:18,940 DEBUG [M:0;412a5e44fd2e:32803 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:18,940 INFO [M:0;412a5e44fd2e:32803 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-20T23:37:18,960 DEBUG [M:0;412a5e44fd2e:32803 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b29a6aa51f1a41e29a1c26f532607701 is 82, key is hbase:meta,,1/info:regioninfo/1732145837913/Put/seqid=0 2024-11-20T23:37:18,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741837_1013 (size=5672) 2024-11-20T23:37:18,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741837_1013 (size=5672) 2024-11-20T23:37:18,967 INFO [M:0;412a5e44fd2e:32803 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b29a6aa51f1a41e29a1c26f532607701 2024-11-20T23:37:18,997 DEBUG [M:0;412a5e44fd2e:32803 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3239b78cf4b646bf95f56a3e5deb7c1b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732145838137/Put/seqid=0 2024-11-20T23:37:19,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741838_1014 (size=5275) 2024-11-20T23:37:19,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741838_1014 (size=5275) 2024-11-20T23:37:19,006 INFO [RS:0;412a5e44fd2e:40629 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:37:19,006 INFO [RS:0;412a5e44fd2e:40629 {}] regionserver.HRegionServer(1031): Exiting; stopping=412a5e44fd2e,40629,1732145836612; zookeeper connection closed. 2024-11-20T23:37:19,007 INFO [M:0;412a5e44fd2e:32803 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3239b78cf4b646bf95f56a3e5deb7c1b 2024-11-20T23:37:19,007 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:37:19,007 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40629-0x1015a9bed2c0001, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:37:19,008 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3261a6ac {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3261a6ac 2024-11-20T23:37:19,008 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T23:37:19,034 DEBUG [M:0;412a5e44fd2e:32803 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/34e559264bd944d89089209cdc63788f is 69, key is 412a5e44fd2e,40629,1732145836612/rs:state/1732145837136/Put/seqid=0 2024-11-20T23:37:19,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741839_1015 (size=5156) 2024-11-20T23:37:19,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741839_1015 (size=5156) 2024-11-20T23:37:19,044 INFO [M:0;412a5e44fd2e:32803 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/34e559264bd944d89089209cdc63788f 2024-11-20T23:37:19,067 DEBUG [M:0;412a5e44fd2e:32803 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c62cd2bb6a3b4bb99a318c4ee26a68f4 is 52, key is load_balancer_on/state:d/1732145838183/Put/seqid=0 2024-11-20T23:37:19,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741840_1016 (size=5056) 2024-11-20T23:37:19,075 INFO [M:0;412a5e44fd2e:32803 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c62cd2bb6a3b4bb99a318c4ee26a68f4 2024-11-20T23:37:19,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741840_1016 (size=5056) 2024-11-20T23:37:19,083 DEBUG [M:0;412a5e44fd2e:32803 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b29a6aa51f1a41e29a1c26f532607701 as hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b29a6aa51f1a41e29a1c26f532607701 2024-11-20T23:37:19,090 INFO [M:0;412a5e44fd2e:32803 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b29a6aa51f1a41e29a1c26f532607701, entries=8, sequenceid=29, filesize=5.5 K 2024-11-20T23:37:19,092 DEBUG [M:0;412a5e44fd2e:32803 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3239b78cf4b646bf95f56a3e5deb7c1b as hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3239b78cf4b646bf95f56a3e5deb7c1b 2024-11-20T23:37:19,099 INFO [M:0;412a5e44fd2e:32803 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3239b78cf4b646bf95f56a3e5deb7c1b, entries=3, sequenceid=29, filesize=5.2 K 2024-11-20T23:37:19,103 DEBUG [M:0;412a5e44fd2e:32803 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/34e559264bd944d89089209cdc63788f as hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/34e559264bd944d89089209cdc63788f 2024-11-20T23:37:19,113 INFO [M:0;412a5e44fd2e:32803 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/34e559264bd944d89089209cdc63788f, entries=1, sequenceid=29, filesize=5.0 K 2024-11-20T23:37:19,115 DEBUG [M:0;412a5e44fd2e:32803 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c62cd2bb6a3b4bb99a318c4ee26a68f4 as hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c62cd2bb6a3b4bb99a318c4ee26a68f4 2024-11-20T23:37:19,123 INFO [M:0;412a5e44fd2e:32803 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34499/user/jenkins/test-data/afece5bd-931f-8b78-ff85-f2ff375d280d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c62cd2bb6a3b4bb99a318c4ee26a68f4, entries=1, sequenceid=29, filesize=4.9 K 2024-11-20T23:37:19,125 INFO [M:0;412a5e44fd2e:32803 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 185ms, sequenceid=29, compaction requested=false 2024-11-20T23:37:19,129 INFO [M:0;412a5e44fd2e:32803 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:19,129 DEBUG [M:0;412a5e44fd2e:32803 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732145838940Disabling compacts and flushes for region at 1732145838940Disabling writes for close at 1732145838940Obtaining lock to block concurrent updates at 1732145838940Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732145838940Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732145838941 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732145838942 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732145838942Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732145838959 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732145838959Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732145838975 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732145838997 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732145838997Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732145839013 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732145839033 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732145839033Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732145839050 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732145839066 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732145839066Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1769a1c3: reopening flushed file at 1732145839081 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f3e60fb: reopening flushed file at 1732145839091 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b9528c2: reopening flushed file at 1732145839100 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b396e17: reopening flushed file at 1732145839113 (+13 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 185ms, sequenceid=29, compaction requested=false at 1732145839125 (+12 ms)Writing region close event to WAL at 1732145839129 (+4 ms)Closed at 1732145839129 2024-11-20T23:37:19,132 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:19,132 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:19,132 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:19,132 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:19,133 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:19,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36311 is added to blk_1073741830_1006 (size=10311) 2024-11-20T23:37:19,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38693 is added to blk_1073741830_1006 (size=10311) 2024-11-20T23:37:19,137 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:37:19,137 INFO [M:0;412a5e44fd2e:32803 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T23:37:19,137 INFO [M:0;412a5e44fd2e:32803 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32803 2024-11-20T23:37:19,138 INFO [M:0;412a5e44fd2e:32803 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:37:19,175 INFO [regionserver/412a5e44fd2e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:37:19,248 INFO [M:0;412a5e44fd2e:32803 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:37:19,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:37:19,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x1015a9bed2c0000, quorum=127.0.0.1:58818, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:37:19,252 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b5e52bc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:19,252 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@882842c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:37:19,252 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:37:19,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@573af0f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:37:19,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d5e070a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/hadoop.log.dir/,STOPPED} 2024-11-20T23:37:19,258 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:37:19,258 WARN [BP-1483738985-172.17.0.2-1732145834006 heartbeating to localhost/127.0.0.1:34499 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:37:19,258 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:37:19,258 WARN [BP-1483738985-172.17.0.2-1732145834006 heartbeating to localhost/127.0.0.1:34499 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1483738985-172.17.0.2-1732145834006 (Datanode Uuid f0d1d127-7280-4a54-90db-a6f2091084ab) service to localhost/127.0.0.1:34499 2024-11-20T23:37:19,258 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/cluster_5f4fe31e-fafa-e3ea-229b-11af3830b9ec/data/data3/current/BP-1483738985-172.17.0.2-1732145834006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:19,259 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/cluster_5f4fe31e-fafa-e3ea-229b-11af3830b9ec/data/data4/current/BP-1483738985-172.17.0.2-1732145834006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:19,259 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:37:19,267 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2cd60cfb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:19,268 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f88e14b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:37:19,268 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:37:19,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e7873b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:37:19,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa9c156{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/hadoop.log.dir/,STOPPED} 2024-11-20T23:37:19,276 WARN [BP-1483738985-172.17.0.2-1732145834006 heartbeating to localhost/127.0.0.1:34499 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:37:19,276 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:37:19,276 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:37:19,276 WARN [BP-1483738985-172.17.0.2-1732145834006 heartbeating to localhost/127.0.0.1:34499 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1483738985-172.17.0.2-1732145834006 (Datanode Uuid ad735f0b-d537-4435-9567-d0c8ca680beb) service to localhost/127.0.0.1:34499 2024-11-20T23:37:19,277 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/cluster_5f4fe31e-fafa-e3ea-229b-11af3830b9ec/data/data1/current/BP-1483738985-172.17.0.2-1732145834006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:19,277 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/cluster_5f4fe31e-fafa-e3ea-229b-11af3830b9ec/data/data2/current/BP-1483738985-172.17.0.2-1732145834006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:19,278 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:37:19,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ce0de36{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:37:19,285 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3614f662{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:37:19,285 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:37:19,285 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a15ed6a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:37:19,285 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@641eaf99{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/hadoop.log.dir/,STOPPED} 2024-11-20T23:37:19,294 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T23:37:19,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T23:37:19,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T23:37:19,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/hadoop.log.dir so I do NOT create it in target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0 2024-11-20T23:37:19,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ce1a6143-b60f-c16f-afe7-38acee980fe6/hadoop.tmp.dir so I do NOT create it in target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0 2024-11-20T23:37:19,319 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428, deleteOnExit=true 2024-11-20T23:37:19,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T23:37:19,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/test.cache.data in system properties and HBase conf 2024-11-20T23:37:19,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T23:37:19,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir in system properties and HBase conf 2024-11-20T23:37:19,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T23:37:19,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T23:37:19,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T23:37:19,320 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T23:37:19,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:37:19,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:37:19,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T23:37:19,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:37:19,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T23:37:19,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T23:37:19,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:37:19,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:37:19,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T23:37:19,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/nfs.dump.dir in system properties and HBase conf 2024-11-20T23:37:19,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/java.io.tmpdir in system properties and HBase conf 2024-11-20T23:37:19,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:37:19,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T23:37:19,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T23:37:19,341 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:37:19,661 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:37:19,669 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:37:19,670 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:37:19,670 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:37:19,670 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T23:37:19,671 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:37:19,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ab5393f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:37:19,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ac253d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:37:19,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2295376c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/java.io.tmpdir/jetty-localhost-42873-hadoop-hdfs-3_4_1-tests_jar-_-any-18316370753651444887/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:37:19,787 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54adbc26{HTTP/1.1, (http/1.1)}{localhost:42873} 2024-11-20T23:37:19,787 INFO [Time-limited test {}] server.Server(415): Started @109987ms 2024-11-20T23:37:19,807 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:37:20,331 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:37:20,335 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:37:20,336 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:37:20,337 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:37:20,337 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:37:20,337 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@372d60ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:37:20,337 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a18c5e6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:37:20,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bba803f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/java.io.tmpdir/jetty-localhost-46379-hadoop-hdfs-3_4_1-tests_jar-_-any-16372456694525274249/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:20,436 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7629a449{HTTP/1.1, (http/1.1)}{localhost:46379} 2024-11-20T23:37:20,436 INFO [Time-limited test {}] server.Server(415): Started @110636ms 2024-11-20T23:37:20,438 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:37:20,508 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:37:20,516 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:37:20,520 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:37:20,520 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:37:20,520 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T23:37:20,521 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@217a95d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:37:20,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c64d82b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:37:20,623 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3efce601{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/java.io.tmpdir/jetty-localhost-35935-hadoop-hdfs-3_4_1-tests_jar-_-any-10558532095246937298/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:20,623 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e79a3d4{HTTP/1.1, (http/1.1)}{localhost:35935} 2024-11-20T23:37:20,623 INFO [Time-limited test {}] server.Server(415): Started @110823ms 2024-11-20T23:37:20,625 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:37:21,670 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data2/current/BP-373955406-172.17.0.2-1732145839356/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:21,670 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data1/current/BP-373955406-172.17.0.2-1732145839356/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:21,688 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:37:21,691 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xee23dc8d081cd626 with lease ID 0xab260f04e4f9a54: Processing first storage report for DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6 from datanode DatanodeRegistration(127.0.0.1:38187, datanodeUuid=c0de83bb-f060-489d-ae10-40822b56955e, infoPort=40131, infoSecurePort=0, ipcPort=44903, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356) 2024-11-20T23:37:21,691 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xee23dc8d081cd626 with lease ID 0xab260f04e4f9a54: from storage DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6 node DatanodeRegistration(127.0.0.1:38187, datanodeUuid=c0de83bb-f060-489d-ae10-40822b56955e, infoPort=40131, infoSecurePort=0, ipcPort=44903, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T23:37:21,691 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xee23dc8d081cd626 with lease ID 0xab260f04e4f9a54: Processing first storage report for DS-4986e3d5-a493-4335-a549-b2f46c19d38e from datanode DatanodeRegistration(127.0.0.1:38187, datanodeUuid=c0de83bb-f060-489d-ae10-40822b56955e, infoPort=40131, infoSecurePort=0, ipcPort=44903, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356) 2024-11-20T23:37:21,691 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xee23dc8d081cd626 with lease ID 0xab260f04e4f9a54: from storage DS-4986e3d5-a493-4335-a549-b2f46c19d38e node DatanodeRegistration(127.0.0.1:38187, datanodeUuid=c0de83bb-f060-489d-ae10-40822b56955e, infoPort=40131, infoSecurePort=0, ipcPort=44903, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:21,805 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data3/current/BP-373955406-172.17.0.2-1732145839356/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:21,805 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data4/current/BP-373955406-172.17.0.2-1732145839356/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:21,823 WARN [Thread-660 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:37:21,825 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc163c7ee99650287 with lease ID 0xab260f04e4f9a55: Processing first storage report for DS-d630948f-c72f-4cdf-9283-16cc93b6fd03 from datanode DatanodeRegistration(127.0.0.1:45219, datanodeUuid=1092f554-8ebb-4f08-bb75-2db829448461, infoPort=34067, infoSecurePort=0, ipcPort=41729, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356) 2024-11-20T23:37:21,825 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc163c7ee99650287 with lease ID 0xab260f04e4f9a55: from storage DS-d630948f-c72f-4cdf-9283-16cc93b6fd03 node DatanodeRegistration(127.0.0.1:45219, datanodeUuid=1092f554-8ebb-4f08-bb75-2db829448461, infoPort=34067, infoSecurePort=0, ipcPort=41729, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:21,825 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc163c7ee99650287 with lease ID 0xab260f04e4f9a55: Processing first storage report for DS-47b2b73c-4eea-472e-8ed0-8595a5d752de from datanode DatanodeRegistration(127.0.0.1:45219, datanodeUuid=1092f554-8ebb-4f08-bb75-2db829448461, infoPort=34067, infoSecurePort=0, ipcPort=41729, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356) 2024-11-20T23:37:21,825 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc163c7ee99650287 with lease ID 0xab260f04e4f9a55: from storage DS-47b2b73c-4eea-472e-8ed0-8595a5d752de node DatanodeRegistration(127.0.0.1:45219, datanodeUuid=1092f554-8ebb-4f08-bb75-2db829448461, infoPort=34067, infoSecurePort=0, ipcPort=41729, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:21,868 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0 2024-11-20T23:37:21,872 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/zookeeper_0, clientPort=51729, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T23:37:21,873 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51729 2024-11-20T23:37:21,873 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:21,875 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:21,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38187 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:37:21,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:37:21,888 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1 with version=8 2024-11-20T23:37:21,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/hbase-staging 2024-11-20T23:37:21,890 INFO [Time-limited test {}] client.ConnectionUtils(128): master/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:37:21,890 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:21,890 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:21,890 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:37:21,891 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:21,891 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:37:21,891 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T23:37:21,891 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:37:21,891 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37185 2024-11-20T23:37:21,893 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37185 connecting to ZooKeeper ensemble=127.0.0.1:51729 2024-11-20T23:37:21,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:371850x0, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:37:21,971 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37185-0x1015a9c027a0000 connected 2024-11-20T23:37:22,064 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:22,066 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:22,069 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:37:22,070 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1, hbase.cluster.distributed=false 2024-11-20T23:37:22,071 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:37:22,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37185 2024-11-20T23:37:22,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37185 2024-11-20T23:37:22,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37185 2024-11-20T23:37:22,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37185 2024-11-20T23:37:22,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37185 2024-11-20T23:37:22,088 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:37:22,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:22,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:22,088 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:37:22,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:22,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:37:22,088 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T23:37:22,089 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:37:22,089 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40151 2024-11-20T23:37:22,091 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40151 connecting to ZooKeeper ensemble=127.0.0.1:51729 2024-11-20T23:37:22,092 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:22,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:22,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:401510x0, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:37:22,106 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:37:22,106 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40151-0x1015a9c027a0001 connected 2024-11-20T23:37:22,107 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T23:37:22,108 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T23:37:22,108 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T23:37:22,110 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:37:22,110 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40151 2024-11-20T23:37:22,110 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40151 2024-11-20T23:37:22,111 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40151 2024-11-20T23:37:22,111 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40151 2024-11-20T23:37:22,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40151 2024-11-20T23:37:22,131 DEBUG [M:0;412a5e44fd2e:37185 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;412a5e44fd2e:37185 2024-11-20T23:37:22,131 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/412a5e44fd2e,37185,1732145841890 2024-11-20T23:37:22,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:37:22,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:37:22,145 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/412a5e44fd2e,37185,1732145841890 2024-11-20T23:37:22,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T23:37:22,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:22,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:22,156 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T23:37:22,156 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/412a5e44fd2e,37185,1732145841890 from backup master directory 2024-11-20T23:37:22,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/412a5e44fd2e,37185,1732145841890 2024-11-20T23:37:22,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:37:22,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:37:22,169 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:37:22,170 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=412a5e44fd2e,37185,1732145841890 2024-11-20T23:37:22,178 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/hbase.id] with ID: 31458b4c-5282-425c-978f-61c7ee6a57b4 2024-11-20T23:37:22,178 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/.tmp/hbase.id 2024-11-20T23:37:22,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:37:22,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38187 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:37:22,186 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/.tmp/hbase.id]:[hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/hbase.id] 2024-11-20T23:37:22,201 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:22,201 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T23:37:22,203 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-20T23:37:22,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:22,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:22,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38187 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:37:22,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:37:22,222 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T23:37:22,223 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T23:37:22,223 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:37:22,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38187 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:37:22,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:37:22,231 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store 2024-11-20T23:37:22,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38187 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:37:22,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:37:22,240 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:37:22,241 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:37:22,241 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:22,241 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:22,241 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:37:22,241 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:22,241 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:37:22,241 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732145842241Disabling compacts and flushes for region at 1732145842241Disabling writes for close at 1732145842241Writing region close event to WAL at 1732145842241Closed at 1732145842241 2024-11-20T23:37:22,242 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/.initializing 2024-11-20T23:37:22,242 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890 2024-11-20T23:37:22,245 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C37185%2C1732145841890, suffix=, logDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890, archiveDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/oldWALs, maxLogs=10 2024-11-20T23:37:22,246 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C37185%2C1732145841890.1732145842246 2024-11-20T23:37:22,251 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890/412a5e44fd2e%2C37185%2C1732145841890.1732145842246 2024-11-20T23:37:22,252 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34067:34067),(127.0.0.1/127.0.0.1:40131:40131)] 2024-11-20T23:37:22,253 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:37:22,253 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:37:22,253 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:22,253 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:22,255 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:22,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T23:37:22,257 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:22,257 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:22,257 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:22,259 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T23:37:22,259 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:22,259 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:37:22,259 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:22,261 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T23:37:22,261 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:22,261 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:37:22,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:22,263 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T23:37:22,263 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:22,264 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:37:22,264 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:22,265 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:22,265 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:22,267 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:22,267 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:22,268 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T23:37:22,269 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:37:22,272 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:37:22,272 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871904, jitterRate=0.10868440568447113}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T23:37:22,273 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732145842254Initializing all the Stores at 1732145842255 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145842255Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145842255Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145842255Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145842255Cleaning up temporary data from old regions at 1732145842267 (+12 ms)Region opened successfully at 1732145842273 (+6 ms) 2024-11-20T23:37:22,274 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T23:37:22,277 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73766fa8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:37:22,278 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T23:37:22,278 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T23:37:22,278 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T23:37:22,279 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T23:37:22,279 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T23:37:22,280 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T23:37:22,280 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T23:37:22,282 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T23:37:22,283 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T23:37:22,295 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T23:37:22,296 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T23:37:22,297 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T23:37:22,306 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T23:37:22,306 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T23:37:22,307 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T23:37:22,316 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T23:37:22,318 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T23:37:22,327 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T23:37:22,332 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T23:37:22,344 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T23:37:22,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:37:22,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:37:22,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:22,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:22,355 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=412a5e44fd2e,37185,1732145841890, sessionid=0x1015a9c027a0000, setting cluster-up flag (Was=false) 2024-11-20T23:37:22,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:22,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:22,407 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T23:37:22,409 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,37185,1732145841890 2024-11-20T23:37:22,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:22,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:22,464 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T23:37:22,466 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,37185,1732145841890 2024-11-20T23:37:22,468 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T23:37:22,471 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T23:37:22,471 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T23:37:22,472 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T23:37:22,472 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 412a5e44fd2e,37185,1732145841890 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T23:37:22,474 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:37:22,474 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:37:22,474 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:37:22,474 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:37:22,474 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/412a5e44fd2e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T23:37:22,474 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,475 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:37:22,475 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,476 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732145872476 2024-11-20T23:37:22,476 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T23:37:22,476 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T23:37:22,476 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T23:37:22,476 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T23:37:22,476 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T23:37:22,476 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T23:37:22,477 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,477 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T23:37:22,477 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:37:22,477 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T23:37:22,477 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T23:37:22,477 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T23:37:22,478 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T23:37:22,478 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T23:37:22,478 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145842478,5,FailOnTimeoutGroup] 2024-11-20T23:37:22,479 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145842478,5,FailOnTimeoutGroup] 2024-11-20T23:37:22,479 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,479 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T23:37:22,479 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,479 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,479 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:22,479 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T23:37:22,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:37:22,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38187 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:37:22,489 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T23:37:22,489 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1 2024-11-20T23:37:22,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38187 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:37:22,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:37:22,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:37:22,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:37:22,500 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:37:22,501 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:22,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:22,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:37:22,503 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:37:22,503 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:22,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:22,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:37:22,505 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:37:22,505 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:22,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:22,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:37:22,507 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:37:22,507 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:22,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:22,507 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:37:22,508 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740 2024-11-20T23:37:22,508 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740 2024-11-20T23:37:22,510 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:37:22,510 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:37:22,510 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:37:22,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:37:22,514 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(746): ClusterId : 31458b4c-5282-425c-978f-61c7ee6a57b4 2024-11-20T23:37:22,514 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T23:37:22,514 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:37:22,514 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=853599, jitterRate=0.08540831506252289}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:37:22,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732145842498Initializing all the Stores at 1732145842499 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145842499Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145842499Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145842499Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145842499Cleaning up temporary data from old regions at 1732145842510 (+11 ms)Region opened successfully at 1732145842516 (+6 ms) 2024-11-20T23:37:22,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:37:22,516 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:37:22,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:37:22,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:37:22,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:37:22,517 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:37:22,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732145842516Disabling compacts and flushes for region at 1732145842516Disabling writes for close at 1732145842516Writing region close event to WAL at 1732145842517 (+1 ms)Closed at 1732145842517 2024-11-20T23:37:22,518 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:37:22,518 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T23:37:22,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T23:37:22,520 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:37:22,521 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T23:37:22,524 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T23:37:22,524 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T23:37:22,535 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T23:37:22,536 DEBUG [RS:0;412a5e44fd2e:40151 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4752ab2d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:37:22,557 DEBUG [RS:0;412a5e44fd2e:40151 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;412a5e44fd2e:40151 2024-11-20T23:37:22,557 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T23:37:22,557 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T23:37:22,557 DEBUG [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T23:37:22,558 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(2659): reportForDuty to master=412a5e44fd2e,37185,1732145841890 with port=40151, startcode=1732145842087 2024-11-20T23:37:22,558 DEBUG [RS:0;412a5e44fd2e:40151 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T23:37:22,560 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55625, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T23:37:22,561 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37185 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 412a5e44fd2e,40151,1732145842087 2024-11-20T23:37:22,561 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37185 {}] master.ServerManager(517): Registering regionserver=412a5e44fd2e,40151,1732145842087 2024-11-20T23:37:22,563 DEBUG [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1 2024-11-20T23:37:22,563 DEBUG [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44951 2024-11-20T23:37:22,563 DEBUG [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T23:37:22,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:37:22,576 DEBUG [RS:0;412a5e44fd2e:40151 {}] zookeeper.ZKUtil(111): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/412a5e44fd2e,40151,1732145842087 2024-11-20T23:37:22,576 WARN [RS:0;412a5e44fd2e:40151 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:37:22,577 INFO [RS:0;412a5e44fd2e:40151 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:37:22,577 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [412a5e44fd2e,40151,1732145842087] 2024-11-20T23:37:22,577 DEBUG [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087 2024-11-20T23:37:22,581 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T23:37:22,584 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T23:37:22,584 INFO [RS:0;412a5e44fd2e:40151 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T23:37:22,584 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,585 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T23:37:22,586 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T23:37:22,586 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,586 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,586 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,586 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,586 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,586 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,586 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:37:22,586 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,586 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,586 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,587 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,587 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,587 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:22,587 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:37:22,587 DEBUG [RS:0;412a5e44fd2e:40151 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:37:22,587 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,587 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,587 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,587 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,587 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,588 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,40151,1732145842087-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:37:22,603 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T23:37:22,603 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,40151,1732145842087-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,603 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,604 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.Replication(171): 412a5e44fd2e,40151,1732145842087 started 2024-11-20T23:37:22,618 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:22,618 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(1482): Serving as 412a5e44fd2e,40151,1732145842087, RpcServer on 412a5e44fd2e/172.17.0.2:40151, sessionid=0x1015a9c027a0001 2024-11-20T23:37:22,618 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T23:37:22,618 DEBUG [RS:0;412a5e44fd2e:40151 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 412a5e44fd2e,40151,1732145842087 2024-11-20T23:37:22,618 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,40151,1732145842087' 2024-11-20T23:37:22,618 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T23:37:22,619 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T23:37:22,620 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T23:37:22,620 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T23:37:22,620 DEBUG [RS:0;412a5e44fd2e:40151 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 412a5e44fd2e,40151,1732145842087 2024-11-20T23:37:22,620 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,40151,1732145842087' 2024-11-20T23:37:22,620 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T23:37:22,620 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T23:37:22,621 DEBUG [RS:0;412a5e44fd2e:40151 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T23:37:22,621 INFO [RS:0;412a5e44fd2e:40151 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T23:37:22,621 INFO [RS:0;412a5e44fd2e:40151 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T23:37:22,672 WARN [412a5e44fd2e:37185 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T23:37:22,723 INFO [RS:0;412a5e44fd2e:40151 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C40151%2C1732145842087, suffix=, logDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087, archiveDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/oldWALs, maxLogs=32 2024-11-20T23:37:22,724 INFO [RS:0;412a5e44fd2e:40151 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C40151%2C1732145842087.1732145842724 2024-11-20T23:37:22,731 INFO [RS:0;412a5e44fd2e:40151 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 2024-11-20T23:37:22,732 DEBUG [RS:0;412a5e44fd2e:40151 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34067:34067),(127.0.0.1/127.0.0.1:40131:40131)] 2024-11-20T23:37:22,922 DEBUG [412a5e44fd2e:37185 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T23:37:22,923 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=412a5e44fd2e,40151,1732145842087 2024-11-20T23:37:22,924 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,40151,1732145842087, state=OPENING 2024-11-20T23:37:22,995 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T23:37:23,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:23,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:37:23,008 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:37:23,008 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:37:23,008 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:37:23,008 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,40151,1732145842087}] 2024-11-20T23:37:23,163 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T23:37:23,168 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41197, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T23:37:23,175 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T23:37:23,175 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:37:23,178 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C40151%2C1732145842087.meta, suffix=.meta, logDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087, archiveDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/oldWALs, maxLogs=32 2024-11-20T23:37:23,179 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta 2024-11-20T23:37:23,185 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta 2024-11-20T23:37:23,187 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40131:40131),(127.0.0.1/127.0.0.1:34067:34067)] 2024-11-20T23:37:23,188 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:37:23,188 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T23:37:23,188 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T23:37:23,188 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T23:37:23,188 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T23:37:23,189 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:37:23,189 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T23:37:23,189 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T23:37:23,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:37:23,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:37:23,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:23,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:23,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:37:23,195 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:37:23,195 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:23,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:23,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:37:23,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:37:23,196 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:23,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:23,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:37:23,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:37:23,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:23,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:37:23,199 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:37:23,199 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740 2024-11-20T23:37:23,201 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740 2024-11-20T23:37:23,202 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:37:23,202 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:37:23,203 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:37:23,204 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:37:23,205 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812552, jitterRate=0.03321453928947449}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:37:23,205 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T23:37:23,206 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732145843189Writing region info on filesystem at 1732145843189Initializing all the Stores at 1732145843190 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145843190Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145843191 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145843191Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145843191Cleaning up temporary data from old regions at 1732145843202 (+11 ms)Running coprocessor post-open hooks at 1732145843205 (+3 ms)Region opened successfully at 1732145843206 (+1 ms) 2024-11-20T23:37:23,208 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732145843162 2024-11-20T23:37:23,210 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T23:37:23,211 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T23:37:23,211 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=412a5e44fd2e,40151,1732145842087 2024-11-20T23:37:23,213 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,40151,1732145842087, state=OPEN 2024-11-20T23:37:23,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:37:23,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:37:23,282 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=412a5e44fd2e,40151,1732145842087 2024-11-20T23:37:23,282 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:37:23,282 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:37:23,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T23:37:23,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,40151,1732145842087 in 274 msec 2024-11-20T23:37:23,291 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T23:37:23,291 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 769 msec 2024-11-20T23:37:23,293 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:37:23,293 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T23:37:23,295 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:37:23,295 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,40151,1732145842087, seqNum=-1] 2024-11-20T23:37:23,296 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:37:23,297 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59459, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:37:23,306 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 834 msec 2024-11-20T23:37:23,306 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732145843306, completionTime=-1 2024-11-20T23:37:23,306 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T23:37:23,306 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T23:37:23,308 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T23:37:23,308 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732145903308 2024-11-20T23:37:23,309 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732145963309 2024-11-20T23:37:23,309 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-20T23:37:23,309 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,37185,1732145841890-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,309 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,37185,1732145841890-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,309 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,37185,1732145841890-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,309 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-412a5e44fd2e:37185, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,309 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,310 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,312 DEBUG [master/412a5e44fd2e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T23:37:23,313 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.143sec 2024-11-20T23:37:23,314 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T23:37:23,314 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T23:37:23,314 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T23:37:23,314 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T23:37:23,314 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T23:37:23,314 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,37185,1732145841890-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:37:23,314 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,37185,1732145841890-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T23:37:23,317 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T23:37:23,317 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T23:37:23,317 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,37185,1732145841890-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,376 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T23:37:23,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:37:23,393 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:37:23,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:37:23,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:37:23,414 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bb75251, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:37:23,414 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 412a5e44fd2e,37185,-1 for getting cluster id 2024-11-20T23:37:23,414 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T23:37:23,416 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '31458b4c-5282-425c-978f-61c7ee6a57b4' 2024-11-20T23:37:23,417 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T23:37:23,417 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "31458b4c-5282-425c-978f-61c7ee6a57b4" 2024-11-20T23:37:23,417 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ff0542e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:37:23,417 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [412a5e44fd2e,37185,-1] 2024-11-20T23:37:23,418 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T23:37:23,418 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:37:23,419 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51506, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T23:37:23,420 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@451ba750, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:37:23,421 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:37:23,422 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,40151,1732145842087, seqNum=-1] 2024-11-20T23:37:23,422 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:37:23,423 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36552, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:37:23,425 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=412a5e44fd2e,37185,1732145841890 2024-11-20T23:37:23,425 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:23,429 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T23:37:23,474 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:37:23,474 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:23,474 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:23,474 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:37:23,474 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:37:23,474 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:37:23,474 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T23:37:23,474 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:37:23,475 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32927 2024-11-20T23:37:23,477 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32927 connecting to ZooKeeper ensemble=127.0.0.1:51729 2024-11-20T23:37:23,478 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:23,480 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:37:23,502 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329270x0, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:37:23,503 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-20T23:37:23,503 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32927-0x1015a9c027a0002 connected 2024-11-20T23:37:23,503 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:32927-0x1015a9c027a0002, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-20T23:37:23,504 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T23:37:23,505 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T23:37:23,505 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:32927-0x1015a9c027a0002, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T23:37:23,507 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32927-0x1015a9c027a0002, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:37:23,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32927 2024-11-20T23:37:23,509 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32927 2024-11-20T23:37:23,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32927 2024-11-20T23:37:23,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32927 2024-11-20T23:37:23,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32927 2024-11-20T23:37:23,518 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.HRegionServer(746): ClusterId : 31458b4c-5282-425c-978f-61c7ee6a57b4 2024-11-20T23:37:23,518 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T23:37:23,528 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T23:37:23,528 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T23:37:23,538 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T23:37:23,539 DEBUG [RS:1;412a5e44fd2e:32927 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bba5bd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:37:23,551 DEBUG [RS:1;412a5e44fd2e:32927 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;412a5e44fd2e:32927 2024-11-20T23:37:23,551 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T23:37:23,551 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T23:37:23,551 DEBUG [RS:1;412a5e44fd2e:32927 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T23:37:23,552 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.HRegionServer(2659): reportForDuty to master=412a5e44fd2e,37185,1732145841890 with port=32927, startcode=1732145843473 2024-11-20T23:37:23,552 DEBUG [RS:1;412a5e44fd2e:32927 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T23:37:23,553 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43487, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T23:37:23,554 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37185 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 412a5e44fd2e,32927,1732145843473 2024-11-20T23:37:23,554 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37185 {}] master.ServerManager(517): Registering regionserver=412a5e44fd2e,32927,1732145843473 2024-11-20T23:37:23,556 DEBUG [RS:1;412a5e44fd2e:32927 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1 2024-11-20T23:37:23,556 DEBUG [RS:1;412a5e44fd2e:32927 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44951 2024-11-20T23:37:23,556 DEBUG [RS:1;412a5e44fd2e:32927 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T23:37:23,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:37:23,570 DEBUG [RS:1;412a5e44fd2e:32927 {}] zookeeper.ZKUtil(111): regionserver:32927-0x1015a9c027a0002, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/412a5e44fd2e,32927,1732145843473 2024-11-20T23:37:23,570 WARN [RS:1;412a5e44fd2e:32927 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:37:23,570 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [412a5e44fd2e,32927,1732145843473] 2024-11-20T23:37:23,570 INFO [RS:1;412a5e44fd2e:32927 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:37:23,570 DEBUG [RS:1;412a5e44fd2e:32927 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473 2024-11-20T23:37:23,574 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T23:37:23,576 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T23:37:23,577 INFO [RS:1;412a5e44fd2e:32927 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T23:37:23,577 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,577 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T23:37:23,578 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T23:37:23,578 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,578 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:23,578 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:23,578 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:23,578 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:23,578 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:23,578 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:37:23,578 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:23,579 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:23,579 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:23,579 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:23,579 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:23,579 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:37:23,579 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:37:23,579 DEBUG [RS:1;412a5e44fd2e:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:37:23,580 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,580 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,580 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,580 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,580 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,581 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,32927,1732145843473-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:37:23,595 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T23:37:23,595 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,32927,1732145843473-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,595 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,595 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.Replication(171): 412a5e44fd2e,32927,1732145843473 started 2024-11-20T23:37:23,608 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:37:23,608 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.HRegionServer(1482): Serving as 412a5e44fd2e,32927,1732145843473, RpcServer on 412a5e44fd2e/172.17.0.2:32927, sessionid=0x1015a9c027a0002 2024-11-20T23:37:23,608 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T23:37:23,608 DEBUG [RS:1;412a5e44fd2e:32927 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 412a5e44fd2e,32927,1732145843473 2024-11-20T23:37:23,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;412a5e44fd2e:32927,5,FailOnTimeoutGroup] 2024-11-20T23:37:23,608 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,32927,1732145843473' 2024-11-20T23:37:23,608 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T23:37:23,609 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-20T23:37:23,609 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T23:37:23,609 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T23:37:23,610 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T23:37:23,610 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T23:37:23,610 DEBUG [RS:1;412a5e44fd2e:32927 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 412a5e44fd2e,32927,1732145843473 2024-11-20T23:37:23,610 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,32927,1732145843473' 2024-11-20T23:37:23,610 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T23:37:23,610 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T23:37:23,610 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 412a5e44fd2e,37185,1732145841890 2024-11-20T23:37:23,611 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6c57f553 2024-11-20T23:37:23,611 DEBUG [RS:1;412a5e44fd2e:32927 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T23:37:23,611 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T23:37:23,611 INFO [RS:1;412a5e44fd2e:32927 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T23:37:23,611 INFO [RS:1;412a5e44fd2e:32927 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T23:37:23,613 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51510, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T23:37:23,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T23:37:23,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T23:37:23,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T23:37:23,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T23:37:23,616 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T23:37:23,616 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:23,616 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-20T23:37:23,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T23:37:23,617 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T23:37:23,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741835_1011 (size=393) 2024-11-20T23:37:23,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38187 is added to blk_1073741835_1011 (size=393) 2024-11-20T23:37:23,628 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0c5970216248783cd04249080d657bfa, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1 2024-11-20T23:37:23,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38187 is added to blk_1073741836_1012 (size=76) 2024-11-20T23:37:23,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741836_1012 (size=76) 2024-11-20T23:37:23,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:37:23,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 0c5970216248783cd04249080d657bfa, disabling compactions & flushes 2024-11-20T23:37:23,638 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:37:23,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:37:23,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. after waiting 0 ms 2024-11-20T23:37:23,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:37:23,638 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:37:23,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0c5970216248783cd04249080d657bfa: Waiting for close lock at 1732145843638Disabling compacts and flushes for region at 1732145843638Disabling writes for close at 1732145843638Writing region close event to WAL at 1732145843638Closed at 1732145843638 2024-11-20T23:37:23,640 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T23:37:23,640 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732145843640"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732145843640"}]},"ts":"1732145843640"} 2024-11-20T23:37:23,643 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T23:37:23,644 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T23:37:23,645 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732145843644"}]},"ts":"1732145843644"} 2024-11-20T23:37:23,647 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-20T23:37:23,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=0c5970216248783cd04249080d657bfa, ASSIGN}] 2024-11-20T23:37:23,649 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=0c5970216248783cd04249080d657bfa, ASSIGN 2024-11-20T23:37:23,650 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=0c5970216248783cd04249080d657bfa, ASSIGN; state=OFFLINE, location=412a5e44fd2e,40151,1732145842087; forceNewPlan=false, retain=false 2024-11-20T23:37:23,713 INFO [RS:1;412a5e44fd2e:32927 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C32927%2C1732145843473, suffix=, logDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473, archiveDir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/oldWALs, maxLogs=32 2024-11-20T23:37:23,714 INFO [RS:1;412a5e44fd2e:32927 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C32927%2C1732145843473.1732145843714 2024-11-20T23:37:23,721 INFO [RS:1;412a5e44fd2e:32927 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 2024-11-20T23:37:23,722 DEBUG [RS:1;412a5e44fd2e:32927 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34067:34067),(127.0.0.1/127.0.0.1:40131:40131)] 2024-11-20T23:37:23,801 INFO [412a5e44fd2e:37185 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-20T23:37:23,802 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0c5970216248783cd04249080d657bfa, regionState=OPENING, regionLocation=412a5e44fd2e,40151,1732145842087 2024-11-20T23:37:23,805 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=0c5970216248783cd04249080d657bfa, ASSIGN because future has completed 2024-11-20T23:37:23,805 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0c5970216248783cd04249080d657bfa, server=412a5e44fd2e,40151,1732145842087}] 2024-11-20T23:37:23,963 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:37:23,963 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0c5970216248783cd04249080d657bfa, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:37:23,964 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:23,964 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:37:23,964 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:23,965 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:23,966 INFO [StoreOpener-0c5970216248783cd04249080d657bfa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:23,968 INFO [StoreOpener-0c5970216248783cd04249080d657bfa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0c5970216248783cd04249080d657bfa columnFamilyName info 2024-11-20T23:37:23,968 DEBUG [StoreOpener-0c5970216248783cd04249080d657bfa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:37:23,969 INFO [StoreOpener-0c5970216248783cd04249080d657bfa-1 {}] regionserver.HStore(327): Store=0c5970216248783cd04249080d657bfa/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:37:23,969 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:23,970 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa 2024-11-20T23:37:23,970 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa 2024-11-20T23:37:23,970 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:23,970 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:23,972 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:23,974 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:37:23,975 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0c5970216248783cd04249080d657bfa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824499, jitterRate=0.04840470850467682}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T23:37:23,975 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:23,976 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0c5970216248783cd04249080d657bfa: Running coprocessor pre-open hook at 1732145843965Writing region info on filesystem at 1732145843965Initializing all the Stores at 1732145843966 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145843966Cleaning up temporary data from old regions at 1732145843970 (+4 ms)Running coprocessor post-open hooks at 1732145843975 (+5 ms)Region opened successfully at 1732145843975 2024-11-20T23:37:23,977 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa., pid=6, masterSystemTime=1732145843958 2024-11-20T23:37:23,980 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:37:23,980 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:37:23,981 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0c5970216248783cd04249080d657bfa, regionState=OPEN, openSeqNum=2, regionLocation=412a5e44fd2e,40151,1732145842087 2024-11-20T23:37:23,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0c5970216248783cd04249080d657bfa, server=412a5e44fd2e,40151,1732145842087 because future has completed 2024-11-20T23:37:23,988 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T23:37:23,988 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0c5970216248783cd04249080d657bfa, server=412a5e44fd2e,40151,1732145842087 in 180 msec 2024-11-20T23:37:23,992 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T23:37:23,992 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=0c5970216248783cd04249080d657bfa, ASSIGN in 341 msec 2024-11-20T23:37:23,993 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T23:37:23,994 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732145843993"}]},"ts":"1732145843993"} 2024-11-20T23:37:23,996 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-20T23:37:23,998 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T23:37:24,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 384 msec 2024-11-20T23:37:26,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T23:37:26,999 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T23:37:27,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T23:37:27,000 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-20T23:37:27,001 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:37:27,001 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T23:37:28,691 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T23:37:28,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:37:28,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:37:28,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:37:28,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:37:28,735 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-20T23:37:33,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T23:37:33,725 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-20T23:37:33,725 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-20T23:37:33,729 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T23:37:33,730 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:37:33,742 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:37:33,746 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:37:33,747 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:37:33,747 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:37:33,747 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:37:33,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ad9bbfc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:37:33,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30008f24{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:37:33,873 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f55aa3b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/java.io.tmpdir/jetty-localhost-36493-hadoop-hdfs-3_4_1-tests_jar-_-any-4687462233277141856/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:33,874 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6bd8b5f{HTTP/1.1, (http/1.1)}{localhost:36493} 2024-11-20T23:37:33,874 INFO [Time-limited test {}] server.Server(415): Started @124074ms 2024-11-20T23:37:33,876 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:37:33,980 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:37:33,988 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:37:34,013 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:37:34,013 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:37:34,013 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:37:34,021 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23038dc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:37:34,021 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f85c2b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:37:34,144 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@57d6f5a1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/java.io.tmpdir/jetty-localhost-37967-hadoop-hdfs-3_4_1-tests_jar-_-any-7253246707091135468/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:34,145 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a2bfd90{HTTP/1.1, (http/1.1)}{localhost:37967} 2024-11-20T23:37:34,145 INFO [Time-limited test {}] server.Server(415): Started @124345ms 2024-11-20T23:37:34,147 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:37:34,212 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:37:34,216 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:37:34,217 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:37:34,217 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:37:34,217 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:37:34,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7524e7e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:37:34,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b21f544{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:37:34,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@463983fb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/java.io.tmpdir/jetty-localhost-39177-hadoop-hdfs-3_4_1-tests_jar-_-any-9371957609725326948/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:34,335 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@57fef5ae{HTTP/1.1, (http/1.1)}{localhost:39177} 2024-11-20T23:37:34,335 INFO [Time-limited test {}] server.Server(415): Started @124535ms 2024-11-20T23:37:34,337 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:37:35,159 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data5/current/BP-373955406-172.17.0.2-1732145839356/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:35,159 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data6/current/BP-373955406-172.17.0.2-1732145839356/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:35,177 WARN [Thread-810 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:37:35,180 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xba7a959ed3a2645f with lease ID 0xab260f04e4f9a56: Processing first storage report for DS-58ca4514-8f9b-4662-8dcc-ed1149af079f from datanode DatanodeRegistration(127.0.0.1:41443, datanodeUuid=a378e659-0cce-4512-aaf8-51321e73df8c, infoPort=45261, infoSecurePort=0, ipcPort=44485, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356) 2024-11-20T23:37:35,180 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xba7a959ed3a2645f with lease ID 0xab260f04e4f9a56: from storage DS-58ca4514-8f9b-4662-8dcc-ed1149af079f node DatanodeRegistration(127.0.0.1:41443, datanodeUuid=a378e659-0cce-4512-aaf8-51321e73df8c, infoPort=45261, infoSecurePort=0, ipcPort=44485, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:35,180 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xba7a959ed3a2645f with lease ID 0xab260f04e4f9a56: Processing first storage report for DS-460e65d4-bef9-4fba-8b87-586f2201653c from datanode DatanodeRegistration(127.0.0.1:41443, datanodeUuid=a378e659-0cce-4512-aaf8-51321e73df8c, infoPort=45261, infoSecurePort=0, ipcPort=44485, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356) 2024-11-20T23:37:35,180 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xba7a959ed3a2645f with lease ID 0xab260f04e4f9a56: from storage DS-460e65d4-bef9-4fba-8b87-586f2201653c node DatanodeRegistration(127.0.0.1:41443, datanodeUuid=a378e659-0cce-4512-aaf8-51321e73df8c, infoPort=45261, infoSecurePort=0, ipcPort=44485, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:35,833 WARN [Thread-881 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data8/current/BP-373955406-172.17.0.2-1732145839356/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:35,833 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data7/current/BP-373955406-172.17.0.2-1732145839356/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:35,854 WARN [Thread-832 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:37:35,857 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x859ad573cb5e2461 with lease ID 0xab260f04e4f9a57: Processing first storage report for DS-ff4ec727-64f0-4c21-934e-5478f6335502 from datanode DatanodeRegistration(127.0.0.1:40329, datanodeUuid=e2e680ee-388f-4c53-9d48-7678b27edabd, infoPort=37611, infoSecurePort=0, ipcPort=35579, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356) 2024-11-20T23:37:35,857 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x859ad573cb5e2461 with lease ID 0xab260f04e4f9a57: from storage DS-ff4ec727-64f0-4c21-934e-5478f6335502 node DatanodeRegistration(127.0.0.1:40329, datanodeUuid=e2e680ee-388f-4c53-9d48-7678b27edabd, infoPort=37611, infoSecurePort=0, ipcPort=35579, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T23:37:35,857 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x859ad573cb5e2461 with lease ID 0xab260f04e4f9a57: Processing first storage report for DS-69d77a60-4b29-4ff3-a0b4-b0d33a48f487 from datanode DatanodeRegistration(127.0.0.1:40329, datanodeUuid=e2e680ee-388f-4c53-9d48-7678b27edabd, infoPort=37611, infoSecurePort=0, ipcPort=35579, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356) 2024-11-20T23:37:35,857 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x859ad573cb5e2461 with lease ID 0xab260f04e4f9a57: from storage DS-69d77a60-4b29-4ff3-a0b4-b0d33a48f487 node DatanodeRegistration(127.0.0.1:40329, datanodeUuid=e2e680ee-388f-4c53-9d48-7678b27edabd, infoPort=37611, infoSecurePort=0, ipcPort=35579, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:35,930 WARN [Thread-892 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10/current/BP-373955406-172.17.0.2-1732145839356/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:35,930 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9/current/BP-373955406-172.17.0.2-1732145839356/current, will proceed with Du for space computation calculation, 2024-11-20T23:37:35,950 WARN [Thread-854 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:37:35,953 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69b670c4b5aa0276 with lease ID 0xab260f04e4f9a58: Processing first storage report for DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846 from datanode DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356) 2024-11-20T23:37:35,953 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69b670c4b5aa0276 with lease ID 0xab260f04e4f9a58: from storage DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846 node DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:35,953 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69b670c4b5aa0276 with lease ID 0xab260f04e4f9a58: Processing first storage report for DS-fff82be5-b559-4e6e-ac19-8dc8a29567f0 from datanode DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356) 2024-11-20T23:37:35,953 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69b670c4b5aa0276 with lease ID 0xab260f04e4f9a58: from storage DS-fff82be5-b559-4e6e-ac19-8dc8a29567f0 node DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:35,991 WARN [ResponseProcessor for block BP-373955406-172.17.0.2-1732145839356:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-373955406-172.17.0.2-1732145839356:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:35,991 WARN [ResponseProcessor for block BP-373955406-172.17.0.2-1732145839356:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-373955406-172.17.0.2-1732145839356:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:35,991 WARN [ResponseProcessor for block BP-373955406-172.17.0.2-1732145839356:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-373955406-172.17.0.2-1732145839356:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:35,992 WARN [DataStreamer for file /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890/412a5e44fd2e%2C37185%2C1732145841890.1732145842246 block BP-373955406-172.17.0.2-1732145839356:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:35,992 WARN [DataStreamer for file /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 block BP-373955406-172.17.0.2-1732145839356:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:35,992 WARN [DataStreamer for file /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 block BP-373955406-172.17.0.2-1732145839356:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:35,992 WARN [ResponseProcessor for block BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:35,993 WARN [DataStreamer for file /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta block BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:35,993 WARN [PacketResponder: BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45219] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:35,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-488779230_22 at /127.0.0.1:54554 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54554 dst: /127.0.0.1:45219 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:35,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1893991804_22 at /127.0.0.1:54620 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:45219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54620 dst: /127.0.0.1:45219 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:35,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:52750 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38187:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52750 dst: /127.0.0.1:38187 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:35,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1893991804_22 at /127.0.0.1:52770 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:38187:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52770 dst: /127.0.0.1:38187 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:35,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-488779230_22 at /127.0.0.1:52718 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38187:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52718 dst: /127.0.0.1:38187 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:35,994 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:54594 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54594 dst: /127.0.0.1:45219 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:35,994 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:52744 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38187:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52744 dst: /127.0.0.1:38187 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:35,994 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:54584 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54584 dst: /127.0.0.1:45219 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:35,996 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3efce601{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:35,997 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e79a3d4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:37:35,997 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:37:35,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c64d82b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:37:35,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@217a95d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,STOPPED} 2024-11-20T23:37:36,000 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:37:36,000 WARN [BP-373955406-172.17.0.2-1732145839356 heartbeating to localhost/127.0.0.1:44951 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:37:36,000 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:37:36,000 WARN [BP-373955406-172.17.0.2-1732145839356 heartbeating to localhost/127.0.0.1:44951 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-373955406-172.17.0.2-1732145839356 (Datanode Uuid 1092f554-8ebb-4f08-bb75-2db829448461) service to localhost/127.0.0.1:44951 2024-11-20T23:37:36,001 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data3/current/BP-373955406-172.17.0.2-1732145839356 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:36,001 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data4/current/BP-373955406-172.17.0.2-1732145839356 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:36,001 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:37:36,002 WARN [DataStreamer for file /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 block BP-373955406-172.17.0.2-1732145839356:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:36,002 WARN [DataStreamer for file /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta block BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:36,002 WARN [DataStreamer for file /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 block BP-373955406-172.17.0.2-1732145839356:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:36,005 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@7b24c947 {}] datanode.DataXceiver(331): 127.0.0.1:38187:DataXceiver error processing unknown operation src: /127.0.0.1:53816 dst: /127.0.0.1:38187 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:36,005 WARN [DataStreamer for file /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890/412a5e44fd2e%2C37185%2C1732145841890.1732145842246 block BP-373955406-172.17.0.2-1732145839356:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:36,008 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bba803f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:36,008 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7629a449{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:37:36,008 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:37:36,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a18c5e6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:37:36,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@372d60ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,STOPPED} 2024-11-20T23:37:36,010 WARN [BP-373955406-172.17.0.2-1732145839356 heartbeating to localhost/127.0.0.1:44951 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:37:36,010 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:37:36,010 WARN [BP-373955406-172.17.0.2-1732145839356 heartbeating to localhost/127.0.0.1:44951 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-373955406-172.17.0.2-1732145839356 (Datanode Uuid c0de83bb-f060-489d-ae10-40822b56955e) service to localhost/127.0.0.1:44951 2024-11-20T23:37:36,010 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:37:36,011 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data1/current/BP-373955406-172.17.0.2-1732145839356 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:36,011 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data2/current/BP-373955406-172.17.0.2-1732145839356 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:36,011 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:37:36,016 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa., hostname=412a5e44fd2e,40151,1732145842087, seqNum=2] 2024-11-20T23:37:36,018 ERROR [FSHLog-0-hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1-prefix:412a5e44fd2e,40151,1732145842087 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:36,018 WARN [FSHLog-0-hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1-prefix:412a5e44fd2e,40151,1732145842087 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:36,018 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:36,018 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C40151%2C1732145842087:(num 1732145842724) roll requested 2024-11-20T23:37:36,019 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C40151%2C1732145842087.1732145856019 2024-11-20T23:37:36,022 WARN [Thread-903 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:36,022 WARN [Thread-903 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:37:36,022 WARN [Thread-903 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741838_1018 2024-11-20T23:37:36,025 WARN [Thread-903 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:37:36,036 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:36,036 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:36,036 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:36,036 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:36,036 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:36,036 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145856019 2024-11-20T23:37:36,044 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:36,045 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45261:45261),(127.0.0.1/127.0.0.1:37611:37611)] 2024-11-20T23:37:36,045 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 is not closed yet, will try archiving it next time 2024-11-20T23:37:36,045 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:36,046 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-20T23:37:36,046 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-20T23:37:36,046 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 2024-11-20T23:37:36,050 WARN [IPC Server handler 4 on default port 44951 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-20T23:37:36,054 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 after 5ms 2024-11-20T23:37:36,224 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:37,580 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:38,045 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:38,046 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145856019 2024-11-20T23:37:38,048 WARN [ResponseProcessor for block BP-373955406-172.17.0.2-1732145839356:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-373955406-172.17.0.2-1732145839356:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:38,049 WARN [DataStreamer for file /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145856019 block BP-373955406-172.17.0.2-1732145839356:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:37:38,049 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:43324 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:41443:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43324 dst: /127.0.0.1:41443 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:38,049 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:47854 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:40329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47854 dst: /127.0.0.1:40329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:38,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f55aa3b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:38,108 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6bd8b5f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:37:38,108 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:37:38,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30008f24{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:37:38,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ad9bbfc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,STOPPED} 2024-11-20T23:37:38,109 WARN [BP-373955406-172.17.0.2-1732145839356 heartbeating to localhost/127.0.0.1:44951 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:37:38,109 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:37:38,109 WARN [BP-373955406-172.17.0.2-1732145839356 heartbeating to localhost/127.0.0.1:44951 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-373955406-172.17.0.2-1732145839356 (Datanode Uuid a378e659-0cce-4512-aaf8-51321e73df8c) service to localhost/127.0.0.1:44951 2024-11-20T23:37:38,109 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:37:38,110 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data5/current/BP-373955406-172.17.0.2-1732145839356 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:38,110 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data6/current/BP-373955406-172.17.0.2-1732145839356 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:38,110 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:37:38,225 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:39,580 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:40,045 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:40,046 WARN [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]] 2024-11-20T23:37:40,046 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C40151%2C1732145842087:(num 1732145856019) roll requested 2024-11-20T23:37:40,047 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C40151%2C1732145842087.1732145860047 2024-11-20T23:37:40,053 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45219 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:40,053 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:47864 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data8]'}, localName='127.0.0.1:40329', datanodeUuid='e2e680ee-388f-4c53-9d48-7678b27edabd', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741840_1022 to mirror 127.0.0.1:45219 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:40,053 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK], DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:40,053 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741840_1022 2024-11-20T23:37:40,053 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:47864 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T23:37:40,053 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:47864 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:40329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47864 dst: /127.0.0.1:40329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:40,054 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK] 2024-11-20T23:37:40,055 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 after 4008ms 2024-11-20T23:37:40,057 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:40,057 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:37:40,057 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741841_1023 2024-11-20T23:37:40,058 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:37:40,061 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:40,060 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:45634 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741842_1024 to mirror 127.0.0.1:41443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:40,061 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:37:40,061 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:45634 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T23:37:40,061 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741842_1024 2024-11-20T23:37:40,061 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:45634 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45634 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:40,062 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:37:40,069 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:40,070 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:40,070 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:40,070 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:40,070 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:40,070 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145856019 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145860047 2024-11-20T23:37:40,071 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44775:44775),(127.0.0.1/127.0.0.1:37611:37611)] 2024-11-20T23:37:40,072 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 is not closed yet, will try archiving it next time 2024-11-20T23:37:40,072 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145856019 is not closed yet, will try archiving it next time 2024-11-20T23:37:40,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40329 is added to blk_1073741839_1021 (size=2431) 2024-11-20T23:37:40,115 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T23:37:40,225 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:40,473 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 is not closed yet, will try archiving it next time 2024-11-20T23:37:41,581 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:41,876 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5eac490c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40329, datanodeUuid=e2e680ee-388f-4c53-9d48-7678b27edabd, infoPort=37611, infoSecurePort=0, ipcPort=35579, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356):Failed to transfer BP-373955406-172.17.0.2-1732145839356:blk_1073741839_1021 to 127.0.0.1:38187 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:42,072 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:42,119 WARN [ResponseProcessor for block BP-373955406-172.17.0.2-1732145839356:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-373955406-172.17.0.2-1732145839356:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-373955406-172.17.0.2-1732145839356:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:42,119 WARN [DataStreamer for file /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145860047 block BP-373955406-172.17.0.2-1732145839356:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:37:42,119 WARN [PacketResponder: BP-373955406-172.17.0.2-1732145839356:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40329] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:42,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:45648 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45648 dst: /127.0.0.1:38539 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:42,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:47876 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:40329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47876 dst: /127.0.0.1:40329 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:42,170 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@57d6f5a1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:42,171 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a2bfd90{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:37:42,171 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:37:42,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f85c2b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:37:42,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23038dc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,STOPPED} 2024-11-20T23:37:42,173 WARN [BP-373955406-172.17.0.2-1732145839356 heartbeating to localhost/127.0.0.1:44951 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:37:42,173 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:37:42,173 WARN [BP-373955406-172.17.0.2-1732145839356 heartbeating to localhost/127.0.0.1:44951 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-373955406-172.17.0.2-1732145839356 (Datanode Uuid e2e680ee-388f-4c53-9d48-7678b27edabd) service to localhost/127.0.0.1:44951 2024-11-20T23:37:42,173 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:37:42,174 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data7/current/BP-373955406-172.17.0.2-1732145839356 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:42,174 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data8/current/BP-373955406-172.17.0.2-1732145839356 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:37:42,175 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:37:42,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40151 {}] regionserver.HRegion(8855): Flush requested on 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:42,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0c5970216248783cd04249080d657bfa 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T23:37:42,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/a0f58ded84cf4678a84b6f37a7188e82 is 1080, key is row0002/info:/1732145858111/Put/seqid=0 2024-11-20T23:37:42,211 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:42,211 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:37:42,211 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741844_1027 2024-11-20T23:37:42,212 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:37:42,214 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:42,214 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:37:42,214 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741845_1028 2024-11-20T23:37:42,215 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:37:42,217 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:42,217 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:42,217 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741846_1029 2024-11-20T23:37:42,217 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK] 2024-11-20T23:37:42,220 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40329 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:42,220 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42316 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741847_1030 to mirror 127.0.0.1:40329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:42,220 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:37:42,220 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42316 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:37:42,220 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741847_1030 2024-11-20T23:37:42,220 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42316 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42316 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:42,221 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:37:42,222 WARN [IPC Server handler 3 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T23:37:42,222 WARN [IPC Server handler 3 on default port 44951 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T23:37:42,222 WARN [IPC Server handler 3 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T23:37:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741848_1031 (size=10347) 2024-11-20T23:37:42,226 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:42,627 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/a0f58ded84cf4678a84b6f37a7188e82 2024-11-20T23:37:42,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/a0f58ded84cf4678a84b6f37a7188e82 as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/a0f58ded84cf4678a84b6f37a7188e82 2024-11-20T23:37:42,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/a0f58ded84cf4678a84b6f37a7188e82, entries=5, sequenceid=11, filesize=10.1 K 2024-11-20T23:37:42,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 0c5970216248783cd04249080d657bfa in 461ms, sequenceid=11, compaction requested=false 2024-11-20T23:37:42,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0c5970216248783cd04249080d657bfa: 2024-11-20T23:37:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40151 {}] regionserver.HRegion(8855): Flush requested on 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:42,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0c5970216248783cd04249080d657bfa 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-20T23:37:42,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/eb906a1f180a45ef9ad3383f83e8e303 is 1080, key is row0007/info:/1732145862188/Put/seqid=0 2024-11-20T23:37:42,828 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42342 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741849_1032 to mirror 127.0.0.1:41443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:42,828 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42342 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:37:42,828 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:42,829 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42342 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42342 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:42,829 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:37:42,829 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741849_1032 2024-11-20T23:37:42,829 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:37:42,831 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:42,831 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:37:42,831 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741850_1033 2024-11-20T23:37:42,832 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:37:42,834 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40329 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:42,834 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42350 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741851_1034 to mirror 127.0.0.1:40329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:42,835 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:37:42,835 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741851_1034 2024-11-20T23:37:42,835 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42350 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:37:42,835 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42350 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42350 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:42,835 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:37:42,838 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45219 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:42,838 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42362 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741852_1035 to mirror 127.0.0.1:45219 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:42,839 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:42,839 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741852_1035 2024-11-20T23:37:42,839 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42362 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:37:42,839 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42362 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42362 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:42,839 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK] 2024-11-20T23:37:42,840 WARN [IPC Server handler 3 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T23:37:42,840 WARN [IPC Server handler 3 on default port 44951 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T23:37:42,841 WARN [IPC Server handler 3 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T23:37:42,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741853_1036 (size=12506) 2024-11-20T23:37:43,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/eb906a1f180a45ef9ad3383f83e8e303 2024-11-20T23:37:43,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/eb906a1f180a45ef9ad3383f83e8e303 as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/eb906a1f180a45ef9ad3383f83e8e303 2024-11-20T23:37:43,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/eb906a1f180a45ef9ad3383f83e8e303, entries=7, sequenceid=24, filesize=12.2 K 2024-11-20T23:37:43,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 0c5970216248783cd04249080d657bfa in 445ms, sequenceid=24, compaction requested=false 2024-11-20T23:37:43,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0c5970216248783cd04249080d657bfa: 2024-11-20T23:37:43,264 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-20T23:37:43,264 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:37:43,264 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/eb906a1f180a45ef9ad3383f83e8e303 because midkey is the same as first or last row 2024-11-20T23:37:43,581 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,072 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,073 WARN [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]] 2024-11-20T23:37:44,073 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C40151%2C1732145842087:(num 1732145860047) roll requested 2024-11-20T23:37:44,073 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C40151%2C1732145842087.1732145864073 2024-11-20T23:37:44,077 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,077 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:37:44,077 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741854_1037 2024-11-20T23:37:44,078 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:37:44,079 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,079 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:44,079 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741855_1038 2024-11-20T23:37:44,080 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK] 2024-11-20T23:37:44,082 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,082 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:37:44,082 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741856_1039 2024-11-20T23:37:44,083 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:37:44,086 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38187 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,086 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42380 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741857_1040 to mirror 127.0.0.1:38187 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:44,086 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:37:44,086 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741857_1040 2024-11-20T23:37:44,087 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42380 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T23:37:44,087 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42380 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42380 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:44,087 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:37:44,088 WARN [IPC Server handler 4 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T23:37:44,088 WARN [IPC Server handler 4 on default port 44951 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T23:37:44,088 WARN [IPC Server handler 4 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T23:37:44,092 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:44,092 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:44,092 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:44,092 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:44,092 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:44,092 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145860047 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145864073 2024-11-20T23:37:44,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741843_1026 (size=25992) 2024-11-20T23:37:44,099 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44775:44775)] 2024-11-20T23:37:44,099 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 is not closed yet, will try archiving it next time 2024-11-20T23:37:44,099 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145860047 is not closed yet, will try archiving it next time 2024-11-20T23:37:44,100 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145856019 to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/oldWALs/412a5e44fd2e%2C40151%2C1732145842087.1732145856019 2024-11-20T23:37:44,226 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40151 {}] regionserver.HRegion(8855): Flush requested on 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:44,241 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0c5970216248783cd04249080d657bfa 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T23:37:44,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/87cf9c50fa9a42b98253d1288aca3ef1 is 1079, key is tmprow/info:/1732145864239/Put/seqid=0 2024-11-20T23:37:44,248 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,248 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:37:44,248 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741859_1042 2024-11-20T23:37:44,249 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:37:44,251 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42396 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741860_1043 to mirror 127.0.0.1:41443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:44,252 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:37:44,252 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741860_1043 2024-11-20T23:37:44,252 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42396 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:37:44,252 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42396 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42396 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:44,252 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:37:44,254 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,254 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:37:44,254 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741861_1044 2024-11-20T23:37:44,255 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:37:44,258 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45219 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42410 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741862_1045 to mirror 127.0.0.1:45219 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:44,258 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:44,258 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42410 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:37:44,258 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741862_1045 2024-11-20T23:37:44,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42410 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42410 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:44,259 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK] 2024-11-20T23:37:44,260 WARN [IPC Server handler 3 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T23:37:44,260 WARN [IPC Server handler 3 on default port 44951 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T23:37:44,260 WARN [IPC Server handler 3 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T23:37:44,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741863_1046 (size=6027) 2024-11-20T23:37:44,495 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 is not closed yet, will try archiving it next time 2024-11-20T23:37:44,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/87cf9c50fa9a42b98253d1288aca3ef1 2024-11-20T23:37:44,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/87cf9c50fa9a42b98253d1288aca3ef1 as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/87cf9c50fa9a42b98253d1288aca3ef1 2024-11-20T23:37:44,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/87cf9c50fa9a42b98253d1288aca3ef1, entries=1, sequenceid=34, filesize=5.9 K 2024-11-20T23:37:44,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 0c5970216248783cd04249080d657bfa in 447ms, sequenceid=34, compaction requested=true 2024-11-20T23:37:44,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0c5970216248783cd04249080d657bfa: 2024-11-20T23:37:44,688 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-20T23:37:44,688 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:37:44,688 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/eb906a1f180a45ef9ad3383f83e8e303 because midkey is the same as first or last row 2024-11-20T23:37:44,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0c5970216248783cd04249080d657bfa:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:37:44,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:37:44,689 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:37:44,692 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:37:44,692 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HStore(1541): 0c5970216248783cd04249080d657bfa/info is initiating minor compaction (all files) 2024-11-20T23:37:44,692 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0c5970216248783cd04249080d657bfa/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:37:44,692 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/a0f58ded84cf4678a84b6f37a7188e82, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/eb906a1f180a45ef9ad3383f83e8e303, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/87cf9c50fa9a42b98253d1288aca3ef1] into tmpdir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp, totalSize=28.2 K 2024-11-20T23:37:44,693 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.Compactor(225): Compacting a0f58ded84cf4678a84b6f37a7188e82, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732145858111 2024-11-20T23:37:44,694 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.Compactor(225): Compacting eb906a1f180a45ef9ad3383f83e8e303, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732145862188 2024-11-20T23:37:44,694 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.Compactor(225): Compacting 87cf9c50fa9a42b98253d1288aca3ef1, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732145864239 2024-11-20T23:37:44,717 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0c5970216248783cd04249080d657bfa#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:37:44,718 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/15fd40aa2918406888051ee19c194c2d is 1080, key is row0002/info:/1732145858111/Put/seqid=0 2024-11-20T23:37:44,721 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,721 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK], DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:37:44,721 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741864_1047 2024-11-20T23:37:44,722 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:37:44,723 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,724 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:44,724 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741865_1048 2024-11-20T23:37:44,724 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK] 2024-11-20T23:37:44,726 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,726 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:37:44,726 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741866_1049 2024-11-20T23:37:44,727 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:37:44,728 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:44,728 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:37:44,728 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741867_1050 2024-11-20T23:37:44,729 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:37:44,730 WARN [IPC Server handler 2 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T23:37:44,730 WARN [IPC Server handler 2 on default port 44951 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T23:37:44,730 WARN [IPC Server handler 2 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T23:37:44,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741868_1051 (size=17994) 2024-11-20T23:37:44,959 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@37279861[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356):Failed to transfer BP-373955406-172.17.0.2-1732145839356:blk_1073741848_1031 to 127.0.0.1:38187 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:44,959 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@33180a5d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356):Failed to transfer BP-373955406-172.17.0.2-1732145839356:blk_1073741853_1036 to 127.0.0.1:40329 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:45,147 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/15fd40aa2918406888051ee19c194c2d as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/15fd40aa2918406888051ee19c194c2d 2024-11-20T23:37:45,156 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0c5970216248783cd04249080d657bfa/info of 0c5970216248783cd04249080d657bfa into 15fd40aa2918406888051ee19c194c2d(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:37:45,156 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0c5970216248783cd04249080d657bfa: 2024-11-20T23:37:45,156 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa., storeName=0c5970216248783cd04249080d657bfa/info, priority=13, startTime=1732145864688; duration=0sec 2024-11-20T23:37:45,156 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-20T23:37:45,156 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:37:45,156 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/15fd40aa2918406888051ee19c194c2d because midkey is the same as first or last row 2024-11-20T23:37:45,156 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-20T23:37:45,156 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:37:45,156 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/15fd40aa2918406888051ee19c194c2d because midkey is the same as first or last row 2024-11-20T23:37:45,156 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-20T23:37:45,157 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:37:45,157 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/15fd40aa2918406888051ee19c194c2d because midkey is the same as first or last row 2024-11-20T23:37:45,157 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:37:45,157 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0c5970216248783cd04249080d657bfa:info 2024-11-20T23:37:45,582 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:45,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40151 {}] regionserver.HRegion(8855): Flush requested on 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:45,661 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0c5970216248783cd04249080d657bfa 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T23:37:45,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/56824b1c39f04367ac40ac3f16e89dc2 is 1079, key is tmprow/info:/1732145865660/Put/seqid=0 2024-11-20T23:37:45,670 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:45,671 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:45,671 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741869_1052 2024-11-20T23:37:45,671 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK] 2024-11-20T23:37:45,674 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40329 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:45,674 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42454 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741870_1053 to mirror 127.0.0.1:40329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:45,675 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:37:45,675 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741870_1053 2024-11-20T23:37:45,675 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42454 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:37:45,675 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42454 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42454 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:45,675 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:37:45,678 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:45,678 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42462 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741871_1054 to mirror 127.0.0.1:41443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:45,678 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:37:45,678 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741871_1054 2024-11-20T23:37:45,679 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42462 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:37:45,679 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42462 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42462 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:45,679 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:37:45,682 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38187 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:45,682 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42470 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741872_1055 to mirror 127.0.0.1:38187 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:45,682 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:37:45,682 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741872_1055 2024-11-20T23:37:45,682 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42470 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:37:45,682 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42470 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42470 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:45,683 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:37:45,684 WARN [IPC Server handler 1 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T23:37:45,684 WARN [IPC Server handler 1 on default port 44951 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T23:37:45,684 WARN [IPC Server handler 1 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T23:37:45,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741873_1056 (size=6027) 2024-11-20T23:37:45,960 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@37279861[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356):Failed to transfer BP-373955406-172.17.0.2-1732145839356:blk_1073741863_1046 to 127.0.0.1:40329 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:45,960 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@33180a5d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356):Failed to transfer BP-373955406-172.17.0.2-1732145839356:blk_1073741843_1026 to 127.0.0.1:40329 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:46,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/56824b1c39f04367ac40ac3f16e89dc2 2024-11-20T23:37:46,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/56824b1c39f04367ac40ac3f16e89dc2 as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/56824b1c39f04367ac40ac3f16e89dc2 2024-11-20T23:37:46,100 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:46,100 WARN [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]] 2024-11-20T23:37:46,100 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C40151%2C1732145842087:(num 1732145864073) roll requested 2024-11-20T23:37:46,100 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C40151%2C1732145842087.1732145866100 2024-11-20T23:37:46,103 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:46,103 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK], DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:37:46,103 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741874_1057 2024-11-20T23:37:46,103 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/56824b1c39f04367ac40ac3f16e89dc2, entries=1, sequenceid=45, filesize=5.9 K 2024-11-20T23:37:46,104 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:37:46,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 0c5970216248783cd04249080d657bfa in 444ms, sequenceid=45, compaction requested=false 2024-11-20T23:37:46,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0c5970216248783cd04249080d657bfa: 2024-11-20T23:37:46,105 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-20T23:37:46,105 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:37:46,105 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/15fd40aa2918406888051ee19c194c2d because midkey is the same as first or last row 2024-11-20T23:37:46,105 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:46,106 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:37:46,106 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741875_1058 2024-11-20T23:37:46,106 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:37:46,109 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38187 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:46,109 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42490 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741876_1059 to mirror 127.0.0.1:38187 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:46,109 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:37:46,109 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741876_1059 2024-11-20T23:37:46,109 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42490 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T23:37:46,109 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42490 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42490 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:46,110 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:37:46,112 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45219 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:46,112 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42494 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741877_1060 to mirror 127.0.0.1:45219 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:46,113 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:46,113 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741877_1060 2024-11-20T23:37:46,113 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42494 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T23:37:46,113 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:42494 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42494 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:46,113 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK] 2024-11-20T23:37:46,114 WARN [IPC Server handler 1 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T23:37:46,114 WARN [IPC Server handler 1 on default port 44951 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T23:37:46,114 WARN [IPC Server handler 1 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T23:37:46,119 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:46,119 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:46,119 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:46,120 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:46,120 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:46,120 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145864073 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145866100 2024-11-20T23:37:46,121 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44775:44775)] 2024-11-20T23:37:46,121 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 is not closed yet, will try archiving it next time 2024-11-20T23:37:46,121 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145864073 is not closed yet, will try archiving it next time 2024-11-20T23:37:46,121 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145860047 to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/oldWALs/412a5e44fd2e%2C40151%2C1732145842087.1732145860047 2024-11-20T23:37:46,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741858_1041 (size=13591) 2024-11-20T23:37:46,227 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:46,523 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 is not closed yet, will try archiving it next time 2024-11-20T23:37:47,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40151 {}] regionserver.HRegion(8855): Flush requested on 0c5970216248783cd04249080d657bfa 2024-11-20T23:37:47,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0c5970216248783cd04249080d657bfa 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T23:37:47,088 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/080c19d626a2455599f51fbe5a4f8a0b is 1079, key is tmprow/info:/1732145867081/Put/seqid=0 2024-11-20T23:37:47,090 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:47,091 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:47,091 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741879_1062 2024-11-20T23:37:47,092 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK] 2024-11-20T23:37:47,093 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:47,093 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:37:47,093 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741880_1063 2024-11-20T23:37:47,094 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:37:47,095 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:47,096 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:37:47,096 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741881_1064 2024-11-20T23:37:47,096 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:37:47,098 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:47,098 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:37:47,098 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741882_1065 2024-11-20T23:37:47,099 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:37:47,099 WARN [IPC Server handler 1 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T23:37:47,099 WARN [IPC Server handler 1 on default port 44951 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T23:37:47,100 WARN [IPC Server handler 1 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T23:37:47,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741883_1066 (size=6027) 2024-11-20T23:37:47,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/080c19d626a2455599f51fbe5a4f8a0b 2024-11-20T23:37:47,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/080c19d626a2455599f51fbe5a4f8a0b as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/080c19d626a2455599f51fbe5a4f8a0b 2024-11-20T23:37:47,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/080c19d626a2455599f51fbe5a4f8a0b, entries=1, sequenceid=55, filesize=5.9 K 2024-11-20T23:37:47,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 0c5970216248783cd04249080d657bfa in 441ms, sequenceid=55, compaction requested=true 2024-11-20T23:37:47,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0c5970216248783cd04249080d657bfa: 2024-11-20T23:37:47,523 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-20T23:37:47,523 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:37:47,523 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/15fd40aa2918406888051ee19c194c2d because midkey is the same as first or last row 2024-11-20T23:37:47,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0c5970216248783cd04249080d657bfa:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:37:47,524 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:37:47,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:37:47,525 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:37:47,525 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HStore(1541): 0c5970216248783cd04249080d657bfa/info is initiating minor compaction (all files) 2024-11-20T23:37:47,525 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0c5970216248783cd04249080d657bfa/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:37:47,525 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/15fd40aa2918406888051ee19c194c2d, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/56824b1c39f04367ac40ac3f16e89dc2, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/080c19d626a2455599f51fbe5a4f8a0b] into tmpdir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp, totalSize=29.3 K 2024-11-20T23:37:47,526 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.Compactor(225): Compacting 15fd40aa2918406888051ee19c194c2d, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732145858111 2024-11-20T23:37:47,527 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.Compactor(225): Compacting 56824b1c39f04367ac40ac3f16e89dc2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732145865660 2024-11-20T23:37:47,527 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.Compactor(225): Compacting 080c19d626a2455599f51fbe5a4f8a0b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732145867081 2024-11-20T23:37:47,544 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0c5970216248783cd04249080d657bfa#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:37:47,544 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/c9c86ce575054c0899209ade4b5a436e is 1080, key is row0002/info:/1732145858111/Put/seqid=0 2024-11-20T23:37:47,547 WARN [Thread-976 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:47,547 WARN [Thread-976 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:37:47,547 WARN [Thread-976 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741884_1067 2024-11-20T23:37:47,548 WARN [Thread-976 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:37:47,549 WARN [Thread-976 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:47,549 WARN [Thread-976 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:37:47,549 WARN [Thread-976 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741885_1068 2024-11-20T23:37:47,550 WARN [Thread-976 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:37:47,551 WARN [Thread-976 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:47,552 WARN [Thread-976 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:37:47,552 WARN [Thread-976 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741886_1069 2024-11-20T23:37:47,552 WARN [Thread-976 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:37:47,554 WARN [Thread-976 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:47,554 WARN [Thread-976 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]) is bad. 2024-11-20T23:37:47,554 WARN [Thread-976 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741887_1070 2024-11-20T23:37:47,554 WARN [Thread-976 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45219,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK] 2024-11-20T23:37:47,555 WARN [IPC Server handler 1 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T23:37:47,555 WARN [IPC Server handler 1 on default port 44951 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T23:37:47,556 WARN [IPC Server handler 1 on default port 44951 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T23:37:47,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741888_1071 (size=18097) 2024-11-20T23:37:47,582 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:47,960 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@37279861[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356):Failed to transfer BP-373955406-172.17.0.2-1732145839356:blk_1073741873_1056 to 127.0.0.1:45219 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:47,960 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@33180a5d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356):Failed to transfer BP-373955406-172.17.0.2-1732145839356:blk_1073741868_1051 to 127.0.0.1:41443 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:47,967 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/c9c86ce575054c0899209ade4b5a436e as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/c9c86ce575054c0899209ade4b5a436e 2024-11-20T23:37:47,975 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0c5970216248783cd04249080d657bfa/info of 0c5970216248783cd04249080d657bfa into c9c86ce575054c0899209ade4b5a436e(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:37:47,975 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0c5970216248783cd04249080d657bfa: 2024-11-20T23:37:47,975 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa., storeName=0c5970216248783cd04249080d657bfa/info, priority=13, startTime=1732145867523; duration=0sec 2024-11-20T23:37:47,975 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-20T23:37:47,975 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:37:47,976 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/c9c86ce575054c0899209ade4b5a436e because midkey is the same as first or last row 2024-11-20T23:37:47,976 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-20T23:37:47,976 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:37:47,976 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/c9c86ce575054c0899209ade4b5a436e because midkey is the same as first or last row 2024-11-20T23:37:47,976 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-20T23:37:47,976 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:37:47,976 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/c9c86ce575054c0899209ade4b5a436e because midkey is the same as first or last row 2024-11-20T23:37:47,976 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:37:47,976 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0c5970216248783cd04249080d657bfa:info 2024-11-20T23:37:48,121 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:48,121 WARN [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-20T23:37:48,227 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:48,308 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:37:48,312 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:37:48,313 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:37:48,313 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:37:48,313 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:37:48,314 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16f8dfc7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:37:48,314 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@441dcfc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:37:48,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6219e1b8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/java.io.tmpdir/jetty-localhost-33849-hadoop-hdfs-3_4_1-tests_jar-_-any-14665745471731106669/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:37:48,435 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e00c8cb{HTTP/1.1, (http/1.1)}{localhost:33849} 2024-11-20T23:37:48,435 INFO [Time-limited test {}] server.Server(415): Started @138635ms 2024-11-20T23:37:48,437 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:37:48,960 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@33180a5d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356):Failed to transfer BP-373955406-172.17.0.2-1732145839356:blk_1073741883_1066 to 127.0.0.1:45219 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:48,960 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@37279861[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356):Failed to transfer BP-373955406-172.17.0.2-1732145839356:blk_1073741858_1041 to 127.0.0.1:45219 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:49,562 WARN [Thread-994 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:37:49,572 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96c144701a59a789 with lease ID 0xab260f04e4f9a59: from storage DS-d630948f-c72f-4cdf-9283-16cc93b6fd03 node DatanodeRegistration(127.0.0.1:42179, datanodeUuid=1092f554-8ebb-4f08-bb75-2db829448461, infoPort=39973, infoSecurePort=0, ipcPort=37791, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:49,572 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96c144701a59a789 with lease ID 0xab260f04e4f9a59: from storage DS-47b2b73c-4eea-472e-8ed0-8595a5d752de node DatanodeRegistration(127.0.0.1:42179, datanodeUuid=1092f554-8ebb-4f08-bb75-2db829448461, infoPort=39973, infoSecurePort=0, ipcPort=37791, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:37:49,583 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:50,122 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:50,227 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:50,960 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@33180a5d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356):Failed to transfer BP-373955406-172.17.0.2-1732145839356:blk_1073741888_1071 to 127.0.0.1:41443 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:37:51,583 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:51,868 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T23:37:52,122 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:52,228 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:52,477 ERROR [FSHLog-0-hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData-prefix:412a5e44fd2e,37185,1732145841890 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:52,478 WARN [FSHLog-0-hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData-prefix:412a5e44fd2e,37185,1732145841890 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:52,478 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C37185%2C1732145841890:(num 1732145842246) roll requested 2024-11-20T23:37:52,478 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C37185%2C1732145841890.1732145872478 2024-11-20T23:37:52,481 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:52,481 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:37:52,481 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741889_1072 2024-11-20T23:37:52,482 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:37:52,484 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:52,484 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:37:52,484 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741890_1073 2024-11-20T23:37:52,484 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:37:52,489 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:52,489 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:52,489 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:52,489 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:52,490 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:37:52,490 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890/412a5e44fd2e%2C37185%2C1732145841890.1732145842246 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890/412a5e44fd2e%2C37185%2C1732145841890.1732145872478 2024-11-20T23:37:52,490 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:52,491 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:52,491 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890/412a5e44fd2e%2C37185%2C1732145841890.1732145842246 2024-11-20T23:37:52,491 WARN [IPC Server handler 2 on default port 44951 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890/412a5e44fd2e%2C37185%2C1732145841890.1732145842246 has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741830_1006 2024-11-20T23:37:52,492 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890/412a5e44fd2e%2C37185%2C1732145841890.1732145842246 after 1ms 2024-11-20T23:37:52,492 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39973:39973),(127.0.0.1/127.0.0.1:44775:44775)] 2024-11-20T23:37:52,492 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890/412a5e44fd2e%2C37185%2C1732145841890.1732145842246 is not closed yet, will try archiving it next time 2024-11-20T23:37:53,583 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:54,122 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:55,584 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:56,123 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:56,493 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890/412a5e44fd2e%2C37185%2C1732145841890.1732145842246 after 4002ms 2024-11-20T23:37:57,584 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:58,123 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:59,585 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:37:59,590 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@47953c78 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-373955406-172.17.0.2-1732145839356:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:38187,null,null]) java.net.ConnectException: Call From 412a5e44fd2e/172.17.0.2 to localhost:44903 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-20T23:37:59,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741833_1020 (size=455) 2024-11-20T23:38:00,076 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/oldWALs/412a5e44fd2e%2C40151%2C1732145842087.1732145842724 2024-11-20T23:38:00,077 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145864073 to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/oldWALs/412a5e44fd2e%2C40151%2C1732145842087.1732145864073 2024-11-20T23:38:00,124 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:01,585 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:01,876 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C40151%2C1732145842087.1732145881876 2024-11-20T23:38:01,880 WARN [Thread-1023 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40329 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:01,880 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-488779230_22 at /127.0.0.1:58322 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741892_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data4]'}, localName='127.0.0.1:42179', datanodeUuid='1092f554-8ebb-4f08-bb75-2db829448461', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741892_1076 to mirror 127.0.0.1:40329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:01,880 WARN [Thread-1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42179,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:38:01,880 WARN [Thread-1023 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741892_1076 2024-11-20T23:38:01,880 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-488779230_22 at /127.0.0.1:58322 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741892_1076] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T23:38:01,880 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-488779230_22 at /127.0.0.1:58322 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741892_1076] {}] datanode.DataXceiver(331): 127.0.0.1:42179:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58322 dst: /127.0.0.1:42179 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:01,881 WARN [Thread-1023 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:38:01,882 WARN [Thread-1023 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:01,883 WARN [Thread-1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:38:01,883 WARN [Thread-1023 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741893_1077 2024-11-20T23:38:01,883 WARN [Thread-1023 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:38:01,884 WARN [Thread-1023 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:01,885 WARN [Thread-1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:38:01,885 WARN [Thread-1023 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741894_1078 2024-11-20T23:38:01,885 WARN [Thread-1023 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:38:01,889 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:01,890 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:01,890 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:01,890 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:01,890 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:01,890 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145866100 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145881876 2024-11-20T23:38:01,891 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44775:44775),(127.0.0.1/127.0.0.1:39973:39973)] 2024-11-20T23:38:01,891 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.1732145866100 is not closed yet, will try archiving it next time 2024-11-20T23:38:01,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741878_1061 (size=12911) 2024-11-20T23:38:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40151 {}] regionserver.HRegion(8855): Flush requested on 0c5970216248783cd04249080d657bfa 2024-11-20T23:38:01,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0c5970216248783cd04249080d657bfa 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T23:38:01,903 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/48f1430d22bd43f188598f8b51b6c316 is 1080, key is row0013/info:/1732145881893/Put/seqid=0 2024-11-20T23:38:01,906 WARN [Thread-1030 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1080 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:01,906 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58336 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741896_1080] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data4]'}, localName='127.0.0.1:42179', datanodeUuid='1092f554-8ebb-4f08-bb75-2db829448461', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741896_1080 to mirror 127.0.0.1:41443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:01,906 WARN [Thread-1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741896_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42179,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:38:01,907 WARN [Thread-1030 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741896_1080 2024-11-20T23:38:01,907 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58336 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741896_1080] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:38:01,907 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58336 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741896_1080] {}] datanode.DataXceiver(331): 127.0.0.1:42179:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58336 dst: /127.0.0.1:42179 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:01,907 WARN [Thread-1030 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:38:01,909 WARN [Thread-1030 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:01,909 WARN [Thread-1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741897_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:38:01,909 WARN [Thread-1030 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741897_1081 2024-11-20T23:38:01,910 WARN [Thread-1030 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:38:01,912 WARN [Thread-1030 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1082 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40329 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:01,912 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:59782 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741898_1082] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741898_1082 to mirror 127.0.0.1:40329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:01,913 WARN [Thread-1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741898_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:38:01,913 WARN [Thread-1030 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741898_1082 2024-11-20T23:38:01,913 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:59782 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741898_1082] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:38:01,913 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:59782 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741898_1082] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59782 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:01,913 WARN [Thread-1030 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:38:01,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741899_1083 (size=8190) 2024-11-20T23:38:01,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741899_1083 (size=8190) 2024-11-20T23:38:01,930 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/48f1430d22bd43f188598f8b51b6c316 2024-11-20T23:38:01,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/48f1430d22bd43f188598f8b51b6c316 as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/48f1430d22bd43f188598f8b51b6c316 2024-11-20T23:38:01,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/48f1430d22bd43f188598f8b51b6c316, entries=3, sequenceid=66, filesize=8.0 K 2024-11-20T23:38:01,945 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 0c5970216248783cd04249080d657bfa in 48ms, sequenceid=66, compaction requested=false 2024-11-20T23:38:01,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0c5970216248783cd04249080d657bfa: 2024-11-20T23:38:01,946 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-20T23:38:01,946 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:38:01,946 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/c9c86ce575054c0899209ade4b5a436e because midkey is the same as first or last row 2024-11-20T23:38:02,124 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,124 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-20T23:38:02,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40151 {}] regionserver.HRegion(8855): Flush requested on 0c5970216248783cd04249080d657bfa 2024-11-20T23:38:02,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0c5970216248783cd04249080d657bfa 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-20T23:38:02,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/8004b3573f794a97bb34853eb1e8f6df is 1080, key is row0015/info:/1732145881899/Put/seqid=0 2024-11-20T23:38:02,134 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1084 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,134 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:59798 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741900_1084] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10]'}, localName='127.0.0.1:38539', datanodeUuid='21c61af8-ae4a-44e9-b5e1-cd0bccadef25', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741900_1084 to mirror 127.0.0.1:41443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:02,135 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741900_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:38:02,135 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741900_1084 2024-11-20T23:38:02,135 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:59798 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741900_1084] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:38:02,135 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:59798 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741900_1084] {}] datanode.DataXceiver(331): 127.0.0.1:38539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59798 dst: /127.0.0.1:38539 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:02,135 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:38:02,137 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,137 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741901_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK], DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:38:02,137 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741901_1085 2024-11-20T23:38:02,138 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:38:02,140 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,140 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741902_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:42179,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:38:02,140 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741902_1086 2024-11-20T23:38:02,141 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:38:02,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741903_1087 (size=14660) 2024-11-20T23:38:02,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741903_1087 (size=14660) 2024-11-20T23:38:02,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/8004b3573f794a97bb34853eb1e8f6df 2024-11-20T23:38:02,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/8004b3573f794a97bb34853eb1e8f6df as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/8004b3573f794a97bb34853eb1e8f6df 2024-11-20T23:38:02,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/8004b3573f794a97bb34853eb1e8f6df, entries=9, sequenceid=79, filesize=14.3 K 2024-11-20T23:38:02,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 0c5970216248783cd04249080d657bfa in 39ms, sequenceid=79, compaction requested=true 2024-11-20T23:38:02,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0c5970216248783cd04249080d657bfa: 2024-11-20T23:38:02,165 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-20T23:38:02,165 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:38:02,166 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/c9c86ce575054c0899209ade4b5a436e because midkey is the same as first or last row 2024-11-20T23:38:02,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0c5970216248783cd04249080d657bfa:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:38:02,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:38:02,166 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:38:02,167 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:38:02,167 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HStore(1541): 0c5970216248783cd04249080d657bfa/info is initiating minor compaction (all files) 2024-11-20T23:38:02,168 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0c5970216248783cd04249080d657bfa/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:38:02,168 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/c9c86ce575054c0899209ade4b5a436e, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/48f1430d22bd43f188598f8b51b6c316, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/8004b3573f794a97bb34853eb1e8f6df] into tmpdir=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp, totalSize=40.0 K 2024-11-20T23:38:02,168 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.Compactor(225): Compacting c9c86ce575054c0899209ade4b5a436e, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732145858111 2024-11-20T23:38:02,169 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.Compactor(225): Compacting 48f1430d22bd43f188598f8b51b6c316, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1732145868096 2024-11-20T23:38:02,170 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8004b3573f794a97bb34853eb1e8f6df, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732145881899 2024-11-20T23:38:02,187 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0c5970216248783cd04249080d657bfa#info#compaction#27 average throughput is 11.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:38:02,188 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/4ec79e56ab464745aff0f01d0efc5b04 is 1080, key is row0002/info:/1732145858111/Put/seqid=0 2024-11-20T23:38:02,193 WARN [Thread-1048 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1088 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38187 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,193 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58388 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741904_1088] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data4]'}, localName='127.0.0.1:42179', datanodeUuid='1092f554-8ebb-4f08-bb75-2db829448461', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741904_1088 to mirror 127.0.0.1:38187 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:02,193 WARN [Thread-1048 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741904_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42179,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:38:02,193 WARN [Thread-1048 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741904_1088 2024-11-20T23:38:02,193 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58388 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741904_1088] {}] datanode.BlockReceiver(316): Block 1073741904 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:38:02,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58388 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741904_1088] {}] datanode.DataXceiver(331): 127.0.0.1:42179:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58388 dst: /127.0.0.1:42179 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:02,194 WARN [Thread-1048 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:38:02,195 WARN [Thread-1048 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,196 WARN [Thread-1048 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741905_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK], DatanodeInfoWithStorage[127.0.0.1:42179,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:38:02,196 WARN [Thread-1048 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741905_1089 2024-11-20T23:38:02,196 WARN [Thread-1048 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:38:02,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741906_1090 (size=28989) 2024-11-20T23:38:02,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741906_1090 (size=28989) 2024-11-20T23:38:02,209 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/.tmp/info/4ec79e56ab464745aff0f01d0efc5b04 as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/4ec79e56ab464745aff0f01d0efc5b04 2024-11-20T23:38:02,218 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0c5970216248783cd04249080d657bfa/info of 0c5970216248783cd04249080d657bfa into 4ec79e56ab464745aff0f01d0efc5b04(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:38:02,218 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0c5970216248783cd04249080d657bfa: 2024-11-20T23:38:02,218 INFO [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa., storeName=0c5970216248783cd04249080d657bfa/info, priority=13, startTime=1732145882166; duration=0sec 2024-11-20T23:38:02,218 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-20T23:38:02,218 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:38:02,219 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/4ec79e56ab464745aff0f01d0efc5b04 because midkey is the same as first or last row 2024-11-20T23:38:02,219 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-20T23:38:02,219 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:38:02,219 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/4ec79e56ab464745aff0f01d0efc5b04 because midkey is the same as first or last row 2024-11-20T23:38:02,219 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-20T23:38:02,219 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:38:02,219 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/4ec79e56ab464745aff0f01d0efc5b04 because midkey is the same as first or last row 2024-11-20T23:38:02,219 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:38:02,219 DEBUG [RS:0;412a5e44fd2e:40151-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0c5970216248783cd04249080d657bfa:info 2024-11-20T23:38:02,327 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T23:38:02,327 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:38:02,327 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:38:02,327 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:02,327 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:02,327 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T23:38:02,328 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T23:38:02,328 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=86293236, stopped=false 2024-11-20T23:38:02,328 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=412a5e44fd2e,37185,1732145841890 2024-11-20T23:38:02,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:38:02,398 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1015a9c027a0002, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:38:02,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:38:02,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:02,399 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1015a9c027a0002, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:02,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:02,399 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:38:02,399 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:38:02,399 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:38:02,399 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:02,399 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '412a5e44fd2e,40151,1732145842087' ***** 2024-11-20T23:38:02,399 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T23:38:02,399 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '412a5e44fd2e,32927,1732145843473' ***** 2024-11-20T23:38:02,399 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T23:38:02,399 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T23:38:02,399 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T23:38:02,400 INFO [RS:1;412a5e44fd2e:32927 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T23:38:02,400 INFO [RS:1;412a5e44fd2e:32927 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T23:38:02,400 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.HRegionServer(959): stopping server 412a5e44fd2e,32927,1732145843473 2024-11-20T23:38:02,400 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:38:02,400 INFO [RS:0;412a5e44fd2e:40151 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T23:38:02,400 INFO [RS:1;412a5e44fd2e:32927 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;412a5e44fd2e:32927. 2024-11-20T23:38:02,400 INFO [RS:0;412a5e44fd2e:40151 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T23:38:02,400 DEBUG [RS:1;412a5e44fd2e:32927 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:38:02,400 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:38:02,400 DEBUG [RS:1;412a5e44fd2e:32927 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:02,400 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(3091): Received CLOSE for 0c5970216248783cd04249080d657bfa 2024-11-20T23:38:02,400 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.HRegionServer(976): stopping server 412a5e44fd2e,32927,1732145843473; all regions closed. 2024-11-20T23:38:02,400 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32927-0x1015a9c027a0002, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:38:02,400 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T23:38:02,400 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T23:38:02,400 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:38:02,400 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(959): stopping server 412a5e44fd2e,40151,1732145842087 2024-11-20T23:38:02,400 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:38:02,400 INFO [RS:0;412a5e44fd2e:40151 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;412a5e44fd2e:40151. 2024-11-20T23:38:02,401 DEBUG [RS:0;412a5e44fd2e:40151 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:38:02,401 DEBUG [RS:0;412a5e44fd2e:40151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:02,401 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0c5970216248783cd04249080d657bfa, disabling compactions & flushes 2024-11-20T23:38:02,401 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T23:38:02,401 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:38:02,401 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T23:38:02,401 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:38:02,401 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T23:38:02,401 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T23:38:02,401 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. after waiting 0 ms 2024-11-20T23:38:02,401 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:38:02,401 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,401 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T23:38:02,401 DEBUG [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 0c5970216248783cd04249080d657bfa=TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.} 2024-11-20T23:38:02,401 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,401 DEBUG [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(1351): Waiting on 0c5970216248783cd04249080d657bfa, 1588230740 2024-11-20T23:38:02,401 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,401 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,402 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:38:02,402 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,402 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:38:02,402 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:38:02,402 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:38:02,402 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:38:02,402 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/a0f58ded84cf4678a84b6f37a7188e82, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/eb906a1f180a45ef9ad3383f83e8e303, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/15fd40aa2918406888051ee19c194c2d, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/87cf9c50fa9a42b98253d1288aca3ef1, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/56824b1c39f04367ac40ac3f16e89dc2, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/c9c86ce575054c0899209ade4b5a436e, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/080c19d626a2455599f51fbe5a4f8a0b, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/48f1430d22bd43f188598f8b51b6c316, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/8004b3573f794a97bb34853eb1e8f6df] to archive 2024-11-20T23:38:02,402 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-20T23:38:02,402 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,402 ERROR [FSHLog-0-hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1-prefix:412a5e44fd2e,40151,1732145842087.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,402 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,402 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 2024-11-20T23:38:02,402 WARN [FSHLog-0-hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1-prefix:412a5e44fd2e,40151,1732145842087.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,403 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C40151%2C1732145842087.meta:.meta(num 1732145843179) roll requested 2024-11-20T23:38:02,403 WARN [IPC Server handler 0 on default port 44951 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 has not been closed. Lease recovery is in progress. RecoveryId = 1091 for block blk_1073741837_1013 2024-11-20T23:38:02,403 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T23:38:02,403 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C40151%2C1732145842087.meta.1732145882403.meta 2024-11-20T23:38:02,403 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 after 1ms 2024-11-20T23:38:02,406 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/a0f58ded84cf4678a84b6f37a7188e82 to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/a0f58ded84cf4678a84b6f37a7188e82 2024-11-20T23:38:02,407 WARN [Thread-1055 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1092 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40329 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,407 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58412 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741907_1092] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data4]'}, localName='127.0.0.1:42179', datanodeUuid='1092f554-8ebb-4f08-bb75-2db829448461', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741907_1092 to mirror 127.0.0.1:40329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:02,407 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/eb906a1f180a45ef9ad3383f83e8e303 to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/eb906a1f180a45ef9ad3383f83e8e303 2024-11-20T23:38:02,407 WARN [Thread-1055 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741907_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42179,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:38:02,407 WARN [Thread-1055 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741907_1092 2024-11-20T23:38:02,407 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58412 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741907_1092] {}] datanode.BlockReceiver(316): Block 1073741907 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T23:38:02,408 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58412 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741907_1092] {}] datanode.DataXceiver(331): 127.0.0.1:42179:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58412 dst: /127.0.0.1:42179 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:02,408 WARN [Thread-1055 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:38:02,409 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/15fd40aa2918406888051ee19c194c2d to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/15fd40aa2918406888051ee19c194c2d 2024-11-20T23:38:02,409 WARN [Thread-1055 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741908_1093 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,409 WARN [Thread-1055 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741908_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK], DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:38:02,409 WARN [Thread-1055 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741908_1093 2024-11-20T23:38:02,410 WARN [Thread-1055 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:38:02,411 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/87cf9c50fa9a42b98253d1288aca3ef1 to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/87cf9c50fa9a42b98253d1288aca3ef1 2024-11-20T23:38:02,412 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/56824b1c39f04367ac40ac3f16e89dc2 to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/56824b1c39f04367ac40ac3f16e89dc2 2024-11-20T23:38:02,414 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/c9c86ce575054c0899209ade4b5a436e to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/c9c86ce575054c0899209ade4b5a436e 2024-11-20T23:38:02,414 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,414 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,414 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,414 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,415 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,415 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145882403.meta 2024-11-20T23:38:02,415 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,415 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/080c19d626a2455599f51fbe5a4f8a0b to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/080c19d626a2455599f51fbe5a4f8a0b 2024-11-20T23:38:02,415 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,415 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta 2024-11-20T23:38:02,416 WARN [IPC Server handler 2 on default port 44951 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta has not been closed. Lease recovery is in progress. RecoveryId = 1095 for block blk_1073741834_1010 2024-11-20T23:38:02,416 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta after 1ms 2024-11-20T23:38:02,416 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39973:39973),(127.0.0.1/127.0.0.1:44775:44775)] 2024-11-20T23:38:02,416 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta is not closed yet, will try archiving it next time 2024-11-20T23:38:02,417 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/48f1430d22bd43f188598f8b51b6c316 to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/48f1430d22bd43f188598f8b51b6c316 2024-11-20T23:38:02,418 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/8004b3573f794a97bb34853eb1e8f6df to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/info/8004b3573f794a97bb34853eb1e8f6df 2024-11-20T23:38:02,419 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=412a5e44fd2e:37185 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-20T23:38:02,419 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a0f58ded84cf4678a84b6f37a7188e82=10347, eb906a1f180a45ef9ad3383f83e8e303=12506, 15fd40aa2918406888051ee19c194c2d=17994, 87cf9c50fa9a42b98253d1288aca3ef1=6027, 56824b1c39f04367ac40ac3f16e89dc2=6027, c9c86ce575054c0899209ade4b5a436e=18097, 080c19d626a2455599f51fbe5a4f8a0b=6027, 48f1430d22bd43f188598f8b51b6c316=8190, 8004b3573f794a97bb34853eb1e8f6df=14660] 2024-11-20T23:38:02,425 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0c5970216248783cd04249080d657bfa/recovered.edits/83.seqid, newMaxSeqId=83, maxSeqId=1 2024-11-20T23:38:02,425 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:38:02,426 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0c5970216248783cd04249080d657bfa: Waiting for close lock at 1732145882401Running coprocessor pre-close hooks at 1732145882401Disabling compacts and flushes for region at 1732145882401Disabling writes for close at 1732145882401Writing region close event to WAL at 1732145882420 (+19 ms)Running coprocessor post-close hooks at 1732145882425 (+5 ms)Closed at 1732145882425 2024-11-20T23:38:02,426 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa. 2024-11-20T23:38:02,434 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/.tmp/info/aa62073cf0a1488b88c1246849cdbbe2 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732145843613.0c5970216248783cd04249080d657bfa./info:regioninfo/1732145843980/Put/seqid=0 2024-11-20T23:38:02,441 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741910_1096 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40329 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,441 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58428 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741910_1096] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data4]'}, localName='127.0.0.1:42179', datanodeUuid='1092f554-8ebb-4f08-bb75-2db829448461', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741910_1096 to mirror 127.0.0.1:40329 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:02,442 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741910_1096 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42179,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:38:02,442 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741910_1096 2024-11-20T23:38:02,442 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58428 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741910_1096] {}] datanode.BlockReceiver(316): Block 1073741910 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:38:02,442 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58428 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741910_1096] {}] datanode.DataXceiver(331): 127.0.0.1:42179:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58428 dst: /127.0.0.1:42179 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:02,442 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:38:02,444 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741911_1097 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,444 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741911_1097 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:42179,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:38:02,444 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741911_1097 2024-11-20T23:38:02,445 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:38:02,446 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741912_1098 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,446 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741912_1098 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK], DatanodeInfoWithStorage[127.0.0.1:42179,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:38:02,447 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741912_1098 2024-11-20T23:38:02,447 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:38:02,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741913_1099 (size=7089) 2024-11-20T23:38:02,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741913_1099 (size=7089) 2024-11-20T23:38:02,455 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/.tmp/info/aa62073cf0a1488b88c1246849cdbbe2 2024-11-20T23:38:02,479 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/.tmp/ns/7385f0fd2f474673baf8b910ce092c49 is 43, key is default/ns:d/1732145843298/Put/seqid=0 2024-11-20T23:38:02,481 WARN [Thread-1070 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741914_1100 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,481 WARN [Thread-1070 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741914_1100 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK], DatanodeInfoWithStorage[127.0.0.1:42179,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:38:02,481 WARN [Thread-1070 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741914_1100 2024-11-20T23:38:02,482 WARN [Thread-1070 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:38:02,483 WARN [Thread-1070 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741915_1101 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,483 WARN [Thread-1070 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741915_1101 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:42179,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:38:02,483 WARN [Thread-1070 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741915_1101 2024-11-20T23:38:02,484 WARN [Thread-1070 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:38:02,486 WARN [Thread-1070 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741916_1102 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,486 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58446 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741916_1102] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data4]'}, localName='127.0.0.1:42179', datanodeUuid='1092f554-8ebb-4f08-bb75-2db829448461', xmitsInProgress=0}:Exception transferring block BP-373955406-172.17.0.2-1732145839356:blk_1073741916_1102 to mirror 127.0.0.1:41443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:02,486 WARN [Thread-1070 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741916_1102 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42179,DS-d630948f-c72f-4cdf-9283-16cc93b6fd03,DISK], DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK]) is bad. 2024-11-20T23:38:02,486 WARN [Thread-1070 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741916_1102 2024-11-20T23:38:02,486 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58446 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741916_1102] {}] datanode.BlockReceiver(316): Block 1073741916 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T23:38:02,486 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_708865898_22 at /127.0.0.1:58446 [Receiving block BP-373955406-172.17.0.2-1732145839356:blk_1073741916_1102] {}] datanode.DataXceiver(331): 127.0.0.1:42179:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58446 dst: /127.0.0.1:42179 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:02,487 WARN [Thread-1070 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41443,DS-58ca4514-8f9b-4662-8dcc-ed1149af079f,DISK] 2024-11-20T23:38:02,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741917_1103 (size=5153) 2024-11-20T23:38:02,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741917_1103 (size=5153) 2024-11-20T23:38:02,492 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/.tmp/ns/7385f0fd2f474673baf8b910ce092c49 2024-11-20T23:38:02,514 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/.tmp/table/78e2a3ead4c5448bb4f744250eaa333f is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732145843993/Put/seqid=0 2024-11-20T23:38:02,516 WARN [Thread-1077 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741918_1104 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,516 WARN [Thread-1077 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741918_1104 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK], DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK]) is bad. 2024-11-20T23:38:02,517 WARN [Thread-1077 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741918_1104 2024-11-20T23:38:02,517 WARN [Thread-1077 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40329,DS-ff4ec727-64f0-4c21-934e-5478f6335502,DISK] 2024-11-20T23:38:02,518 WARN [Thread-1077 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741919_1105 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:02,519 WARN [Thread-1077 {}] hdfs.DataStreamer(1731): Error Recovery for BP-373955406-172.17.0.2-1732145839356:blk_1073741919_1105 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK], DatanodeInfoWithStorage[127.0.0.1:38539,DS-9542a1fc-4f63-46e3-89a1-6bb68dfe1846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK]) is bad. 2024-11-20T23:38:02,519 WARN [Thread-1077 {}] hdfs.DataStreamer(1850): Abandoning BP-373955406-172.17.0.2-1732145839356:blk_1073741919_1105 2024-11-20T23:38:02,519 WARN [Thread-1077 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38187,DS-2702dbc0-59cf-40e2-b408-e6cd01eebcc6,DISK] 2024-11-20T23:38:02,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741920_1106 (size=5424) 2024-11-20T23:38:02,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741920_1106 (size=5424) 2024-11-20T23:38:02,525 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/.tmp/table/78e2a3ead4c5448bb4f744250eaa333f 2024-11-20T23:38:02,532 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/.tmp/info/aa62073cf0a1488b88c1246849cdbbe2 as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/info/aa62073cf0a1488b88c1246849cdbbe2 2024-11-20T23:38:02,538 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/info/aa62073cf0a1488b88c1246849cdbbe2, entries=10, sequenceid=11, filesize=6.9 K 2024-11-20T23:38:02,539 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/.tmp/ns/7385f0fd2f474673baf8b910ce092c49 as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/ns/7385f0fd2f474673baf8b910ce092c49 2024-11-20T23:38:02,545 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/ns/7385f0fd2f474673baf8b910ce092c49, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T23:38:02,546 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/.tmp/table/78e2a3ead4c5448bb4f744250eaa333f as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/table/78e2a3ead4c5448bb4f744250eaa333f 2024-11-20T23:38:02,553 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/table/78e2a3ead4c5448bb4f744250eaa333f, entries=2, sequenceid=11, filesize=5.3 K 2024-11-20T23:38:02,555 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 153ms, sequenceid=11, compaction requested=false 2024-11-20T23:38:02,560 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T23:38:02,561 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:38:02,561 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:38:02,561 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732145882401Running coprocessor pre-close hooks at 1732145882401Disabling compacts and flushes for region at 1732145882401Disabling writes for close at 1732145882402 (+1 ms)Obtaining lock to block concurrent updates at 1732145882402Preparing flush snapshotting stores in 1588230740 at 1732145882402Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732145882402Flushing stores of hbase:meta,,1.1588230740 at 1732145882417 (+15 ms)Flushing 1588230740/info: creating writer at 1732145882417Flushing 1588230740/info: appending metadata at 1732145882434 (+17 ms)Flushing 1588230740/info: closing flushed file at 1732145882434Flushing 1588230740/ns: creating writer at 1732145882461 (+27 ms)Flushing 1588230740/ns: appending metadata at 1732145882478 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732145882478Flushing 1588230740/table: creating writer at 1732145882498 (+20 ms)Flushing 1588230740/table: appending metadata at 1732145882514 (+16 ms)Flushing 1588230740/table: closing flushed file at 1732145882514Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ed5e63f: reopening flushed file at 1732145882531 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3978a41e: reopening flushed file at 1732145882538 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@694dbdc2: reopening flushed file at 1732145882545 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 153ms, sequenceid=11, compaction requested=false at 1732145882555 (+10 ms)Writing region close event to WAL at 1732145882556 (+1 ms)Running coprocessor post-close hooks at 1732145882561 (+5 ms)Closed at 1732145882561 2024-11-20T23:38:02,561 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T23:38:02,581 INFO [regionserver/412a5e44fd2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T23:38:02,581 INFO [regionserver/412a5e44fd2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T23:38:02,589 INFO [regionserver/412a5e44fd2e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:38:02,602 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(976): stopping server 412a5e44fd2e,40151,1732145842087; all regions closed. 2024-11-20T23:38:02,604 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,604 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,604 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,605 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,605 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:02,606 INFO [regionserver/412a5e44fd2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T23:38:02,607 INFO [regionserver/412a5e44fd2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T23:38:02,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741909_1094 (size=825) 2024-11-20T23:38:02,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741909_1094 (size=825) 2024-11-20T23:38:02,960 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@33180a5d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38539, datanodeUuid=21c61af8-ae4a-44e9-b5e1-cd0bccadef25, infoPort=44775, infoSecurePort=0, ipcPort=39433, storageInfo=lv=-57;cid=testClusterID;nsid=1966669279;c=1732145839356):Failed to transfer BP-373955406-172.17.0.2-1732145839356:blk_1073741878_1061 to 127.0.0.1:38187 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:03,318 INFO [master/412a5e44fd2e:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T23:38:03,318 INFO [master/412a5e44fd2e:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T23:38:03,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741833_1020 (size=455) 2024-11-20T23:38:03,583 INFO [regionserver/412a5e44fd2e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:38:05,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741835_1011 (size=393) 2024-11-20T23:38:05,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:38:06,404 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 after 4002ms 2024-11-20T23:38:06,417 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta after 4002ms 2024-11-20T23:38:06,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:38:06,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:38:06,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T23:38:07,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:38:07,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T23:38:07,402 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-20T23:38:07,406 DEBUG [RS:1;412a5e44fd2e:32927 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/oldWALs 2024-11-20T23:38:07,406 INFO [RS:1;412a5e44fd2e:32927 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C32927%2C1732145843473:(num 1732145843714) 2024-11-20T23:38:07,406 DEBUG [RS:1;412a5e44fd2e:32927 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:07,406 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:38:07,406 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:38:07,407 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.ChoreService(370): Chore service for: regionserver/412a5e44fd2e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T23:38:07,407 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T23:38:07,407 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:38:07,407 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T23:38:07,407 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T23:38:07,407 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:38:07,407 INFO [RS:1;412a5e44fd2e:32927 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32927 2024-11-20T23:38:07,426 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,466 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,477 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1015a9c027a0002, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/412a5e44fd2e,32927,1732145843473 2024-11-20T23:38:07,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:38:07,478 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:38:07,488 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [412a5e44fd2e,32927,1732145843473] 2024-11-20T23:38:07,498 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/412a5e44fd2e,32927,1732145843473 already deleted, retry=false 2024-11-20T23:38:07,499 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 412a5e44fd2e,32927,1732145843473 expired; onlineServers=1 2024-11-20T23:38:07,588 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1015a9c027a0002, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:38:07,588 INFO [RS:1;412a5e44fd2e:32927 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:38:07,588 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1015a9c027a0002, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:38:07,588 INFO [RS:1;412a5e44fd2e:32927 {}] regionserver.HRegionServer(1031): Exiting; stopping=412a5e44fd2e,32927,1732145843473; zookeeper connection closed. 2024-11-20T23:38:07,589 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7da6fd3f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7da6fd3f 2024-11-20T23:38:07,605 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-20T23:38:07,608 DEBUG [RS:0;412a5e44fd2e:40151 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/oldWALs 2024-11-20T23:38:07,608 INFO [RS:0;412a5e44fd2e:40151 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C40151%2C1732145842087.meta:.meta(num 1732145882403) 2024-11-20T23:38:07,609 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:07,609 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:07,609 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:07,609 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:07,609 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:07,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741895_1079 (size=16308) 2024-11-20T23:38:07,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741895_1079 (size=16308) 2024-11-20T23:38:07,615 DEBUG [RS:0;412a5e44fd2e:40151 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/oldWALs 2024-11-20T23:38:07,615 INFO [RS:0;412a5e44fd2e:40151 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C40151%2C1732145842087:(num 1732145881876) 2024-11-20T23:38:07,615 DEBUG [RS:0;412a5e44fd2e:40151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:07,615 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:38:07,615 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:38:07,616 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.ChoreService(370): Chore service for: regionserver/412a5e44fd2e:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-20T23:38:07,616 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:38:07,616 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:38:07,616 INFO [RS:0;412a5e44fd2e:40151 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40151 2024-11-20T23:38:07,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:38:07,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/412a5e44fd2e,40151,1732145842087 2024-11-20T23:38:07,625 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:38:07,626 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [412a5e44fd2e,40151,1732145842087] 2024-11-20T23:38:07,646 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/412a5e44fd2e,40151,1732145842087 already deleted, retry=false 2024-11-20T23:38:07,646 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 412a5e44fd2e,40151,1732145842087 expired; onlineServers=0 2024-11-20T23:38:07,646 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '412a5e44fd2e,37185,1732145841890' ***** 2024-11-20T23:38:07,646 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T23:38:07,646 INFO [M:0;412a5e44fd2e:37185 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:38:07,647 INFO [M:0;412a5e44fd2e:37185 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:38:07,647 DEBUG [M:0;412a5e44fd2e:37185 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T23:38:07,647 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T23:38:07,647 DEBUG [M:0;412a5e44fd2e:37185 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T23:38:07,647 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145842478 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145842478,5,FailOnTimeoutGroup] 2024-11-20T23:38:07,647 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145842478 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145842478,5,FailOnTimeoutGroup] 2024-11-20T23:38:07,647 INFO [M:0;412a5e44fd2e:37185 {}] hbase.ChoreService(370): Chore service for: master/412a5e44fd2e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T23:38:07,647 INFO [M:0;412a5e44fd2e:37185 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:38:07,648 DEBUG [M:0;412a5e44fd2e:37185 {}] master.HMaster(1795): Stopping service threads 2024-11-20T23:38:07,648 INFO [M:0;412a5e44fd2e:37185 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T23:38:07,648 INFO [M:0;412a5e44fd2e:37185 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:38:07,648 INFO [M:0;412a5e44fd2e:37185 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T23:38:07,648 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T23:38:07,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T23:38:07,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:07,656 DEBUG [M:0;412a5e44fd2e:37185 {}] zookeeper.ZKUtil(347): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T23:38:07,657 WARN [M:0;412a5e44fd2e:37185 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T23:38:07,657 INFO [M:0;412a5e44fd2e:37185 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/.lastflushedseqids 2024-11-20T23:38:07,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741921_1107 (size=130) 2024-11-20T23:38:07,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741921_1107 (size=130) 2024-11-20T23:38:07,670 INFO [M:0;412a5e44fd2e:37185 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T23:38:07,670 INFO [M:0;412a5e44fd2e:37185 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T23:38:07,670 DEBUG [M:0;412a5e44fd2e:37185 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:38:07,670 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:07,670 DEBUG [M:0;412a5e44fd2e:37185 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:07,670 DEBUG [M:0;412a5e44fd2e:37185 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:38:07,670 DEBUG [M:0;412a5e44fd2e:37185 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:07,670 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-20T23:38:07,685 DEBUG [M:0;412a5e44fd2e:37185 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ad8a1499d13041f38c9568fcb3c9a6f6 is 82, key is hbase:meta,,1/info:regioninfo/1732145843211/Put/seqid=0 2024-11-20T23:38:07,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741922_1108 (size=5672) 2024-11-20T23:38:07,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741922_1108 (size=5672) 2024-11-20T23:38:07,691 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ad8a1499d13041f38c9568fcb3c9a6f6 2024-11-20T23:38:07,712 DEBUG [M:0;412a5e44fd2e:37185 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/33ed6ad09679493288a7f6f7d7498973 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732145844001/Put/seqid=0 2024-11-20T23:38:07,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741923_1109 (size=6256) 2024-11-20T23:38:07,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741923_1109 (size=6256) 2024-11-20T23:38:07,720 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/33ed6ad09679493288a7f6f7d7498973 2024-11-20T23:38:07,725 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 33ed6ad09679493288a7f6f7d7498973 2024-11-20T23:38:07,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:38:07,736 INFO [RS:0;412a5e44fd2e:40151 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:38:07,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40151-0x1015a9c027a0001, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:38:07,736 INFO [RS:0;412a5e44fd2e:40151 {}] regionserver.HRegionServer(1031): Exiting; stopping=412a5e44fd2e,40151,1732145842087; zookeeper connection closed. 2024-11-20T23:38:07,736 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@77592999 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@77592999 2024-11-20T23:38:07,736 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-20T23:38:07,741 DEBUG [M:0;412a5e44fd2e:37185 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7268841077f641c092b9c4f0e7ddacbc is 69, key is 412a5e44fd2e,32927,1732145843473/rs:state/1732145843554/Put/seqid=0 2024-11-20T23:38:07,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741924_1110 (size=5224) 2024-11-20T23:38:07,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741924_1110 (size=5224) 2024-11-20T23:38:07,748 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7268841077f641c092b9c4f0e7ddacbc 2024-11-20T23:38:07,771 DEBUG [M:0;412a5e44fd2e:37185 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b166da9c952c4dac851316b4bf2d773e is 52, key is load_balancer_on/state:d/1732145843427/Put/seqid=0 2024-11-20T23:38:07,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741925_1111 (size=5056) 2024-11-20T23:38:07,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741925_1111 (size=5056) 2024-11-20T23:38:07,777 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b166da9c952c4dac851316b4bf2d773e 2024-11-20T23:38:07,784 DEBUG [M:0;412a5e44fd2e:37185 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ad8a1499d13041f38c9568fcb3c9a6f6 as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ad8a1499d13041f38c9568fcb3c9a6f6 2024-11-20T23:38:07,790 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ad8a1499d13041f38c9568fcb3c9a6f6, entries=8, sequenceid=60, filesize=5.5 K 2024-11-20T23:38:07,791 DEBUG [M:0;412a5e44fd2e:37185 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/33ed6ad09679493288a7f6f7d7498973 as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/33ed6ad09679493288a7f6f7d7498973 2024-11-20T23:38:07,799 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 33ed6ad09679493288a7f6f7d7498973 2024-11-20T23:38:07,799 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/33ed6ad09679493288a7f6f7d7498973, entries=6, sequenceid=60, filesize=6.1 K 2024-11-20T23:38:07,801 DEBUG [M:0;412a5e44fd2e:37185 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7268841077f641c092b9c4f0e7ddacbc as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7268841077f641c092b9c4f0e7ddacbc 2024-11-20T23:38:07,814 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7268841077f641c092b9c4f0e7ddacbc, entries=2, sequenceid=60, filesize=5.1 K 2024-11-20T23:38:07,815 DEBUG [M:0;412a5e44fd2e:37185 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b166da9c952c4dac851316b4bf2d773e as hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b166da9c952c4dac851316b4bf2d773e 2024-11-20T23:38:07,821 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b166da9c952c4dac851316b4bf2d773e, entries=1, sequenceid=60, filesize=4.9 K 2024-11-20T23:38:07,822 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=60, compaction requested=false 2024-11-20T23:38:07,828 INFO [M:0;412a5e44fd2e:37185 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:07,828 DEBUG [M:0;412a5e44fd2e:37185 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732145887670Disabling compacts and flushes for region at 1732145887670Disabling writes for close at 1732145887670Obtaining lock to block concurrent updates at 1732145887670Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732145887670Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1732145887671 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732145887671Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732145887671Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732145887685 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732145887685Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732145887696 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732145887711 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732145887711Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732145887725 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732145887741 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732145887741Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732145887754 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732145887770 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732145887770Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1172aea9: reopening flushed file at 1732145887783 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e40c19e: reopening flushed file at 1732145887790 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@145a3fe: reopening flushed file at 1732145887799 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1033609f: reopening flushed file at 1732145887814 (+15 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=60, compaction requested=false at 1732145887822 (+8 ms)Writing region close event to WAL at 1732145887828 (+6 ms)Closed at 1732145887828 2024-11-20T23:38:07,831 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:07,831 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:07,831 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:07,831 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:07,831 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:07,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741891_1074 (size=1045) 2024-11-20T23:38:07,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42179 is added to blk_1073741891_1074 (size=1045) 2024-11-20T23:38:07,968 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T23:38:07,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:07,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:08,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:08,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:08,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741836_1012 (size=76) 2024-11-20T23:38:08,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:38:09,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:09,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:09,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:38:09,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38539 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:38:09,594 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@31c53b8d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-373955406-172.17.0.2-1732145839356:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:38187,null,null]) java.net.ConnectException: Call From 412a5e44fd2e/172.17.0.2 to localhost:44903 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-20T23:38:10,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:10,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:10,503 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/WALs/412a5e44fd2e,37185,1732145841890/412a5e44fd2e%2C37185%2C1732145841890.1732145842246 to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/oldWALs/412a5e44fd2e%2C37185%2C1732145841890.1732145842246 2024-11-20T23:38:10,506 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/MasterData/oldWALs/412a5e44fd2e%2C37185%2C1732145841890.1732145842246 to hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/oldWALs/412a5e44fd2e%2C37185%2C1732145841890.1732145842246$masterlocalwal$ 2024-11-20T23:38:10,507 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:38:10,507 INFO [M:0;412a5e44fd2e:37185 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T23:38:10,507 INFO [M:0;412a5e44fd2e:37185 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37185 2024-11-20T23:38:10,507 INFO [M:0;412a5e44fd2e:37185 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:38:10,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:38:10,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37185-0x1015a9c027a0000, quorum=127.0.0.1:51729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:38:10,662 INFO [M:0;412a5e44fd2e:37185 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:38:10,664 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6219e1b8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:10,665 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e00c8cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:38:10,665 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:38:10,665 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@441dcfc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:38:10,665 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16f8dfc7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,STOPPED} 2024-11-20T23:38:10,666 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:38:10,666 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:38:10,666 WARN [BP-373955406-172.17.0.2-1732145839356 heartbeating to localhost/127.0.0.1:44951 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:38:10,666 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@761819d8 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-373955406-172.17.0.2-1732145839356:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:38187,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:44903 , LocalHost:localPort 412a5e44fd2e/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-20T23:38:10,666 WARN [BP-373955406-172.17.0.2-1732145839356 heartbeating to localhost/127.0.0.1:44951 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-373955406-172.17.0.2-1732145839356 (Datanode Uuid 1092f554-8ebb-4f08-bb75-2db829448461) service to localhost/127.0.0.1:44951 2024-11-20T23:38:10,667 ERROR [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@761819d8 {}] datanode.DataNode(1743): Cannot find BPOfferService for reporting block received for bpid=BP-373955406-172.17.0.2-1732145839356 2024-11-20T23:38:10,667 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data3/current/BP-373955406-172.17.0.2-1732145839356 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:10,668 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data4/current/BP-373955406-172.17.0.2-1732145839356 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:10,668 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:38:10,668 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@761819d8 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:38187,null,null]) java.io.IOException: No block pool offer service for bpid=BP-373955406-172.17.0.2-1732145839356 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:10,668 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@761819d8 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:42179,null,null]) java.io.IOException: No block pool offer service for bpid=BP-373955406-172.17.0.2-1732145839356 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:10,668 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@761819d8 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:38187,null,null], DatanodeInfoWithStorage[127.0.0.1:42179,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-373955406-172.17.0.2-1732145839356:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:38187,null,null], DatanodeInfoWithStorage[127.0.0.1:42179,null,null]] 2024-11-20T23:38:10,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@463983fb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:10,674 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@57fef5ae{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:38:10,674 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:38:10,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b21f544{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:38:10,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7524e7e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,STOPPED} 2024-11-20T23:38:10,677 WARN [BP-373955406-172.17.0.2-1732145839356 heartbeating to localhost/127.0.0.1:44951 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:38:10,677 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:38:10,677 WARN [BP-373955406-172.17.0.2-1732145839356 heartbeating to localhost/127.0.0.1:44951 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-373955406-172.17.0.2-1732145839356 (Datanode Uuid 21c61af8-ae4a-44e9-b5e1-cd0bccadef25) service to localhost/127.0.0.1:44951 2024-11-20T23:38:10,677 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:38:10,677 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data9/current/BP-373955406-172.17.0.2-1732145839356 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:10,677 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/cluster_36db01ee-b121-8739-4634-3caa4a338428/data/data10/current/BP-373955406-172.17.0.2-1732145839356 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:10,678 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:38:10,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2295376c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:38:10,684 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54adbc26{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:38:10,684 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:38:10,684 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ac253d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:38:10,684 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ab5393f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir/,STOPPED} 2024-11-20T23:38:10,692 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T23:38:10,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T23:38:10,738 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 81) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44951 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:44951 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44951 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44951 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44951 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44951 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:44951 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44951 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44951 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34499 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fe668bf5390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:34499 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44951 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fe668bf5390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44951 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=319 (was 280) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=264 (was 1149) 2024-11-20T23:38:10,745 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=156, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=319, ProcessCount=11, AvailableMemoryMB=264 2024-11-20T23:38:10,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T23:38:10,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.log.dir so I do NOT create it in target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9 2024-11-20T23:38:10,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f01bf5c8-ff83-be66-796e-cd02b03ffdf0/hadoop.tmp.dir so I do NOT create it in target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9 2024-11-20T23:38:10,746 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c, deleteOnExit=true 2024-11-20T23:38:10,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T23:38:10,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/test.cache.data in system properties and HBase conf 2024-11-20T23:38:10,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T23:38:10,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir in system properties and HBase conf 2024-11-20T23:38:10,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T23:38:10,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T23:38:10,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T23:38:10,746 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T23:38:10,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:38:10,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:38:10,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T23:38:10,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:38:10,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T23:38:10,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T23:38:10,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:38:10,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:38:10,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T23:38:10,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/nfs.dump.dir in system properties and HBase conf 2024-11-20T23:38:10,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/java.io.tmpdir in system properties and HBase conf 2024-11-20T23:38:10,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:38:10,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T23:38:10,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T23:38:10,760 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:38:11,134 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:38:11,143 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:38:11,145 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:38:11,145 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:38:11,145 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:38:11,146 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:38:11,146 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45628471{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:38:11,147 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bf7054a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:38:11,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@71718145{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/java.io.tmpdir/jetty-localhost-44999-hadoop-hdfs-3_4_1-tests_jar-_-any-10089690396466363437/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:38:11,258 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37ba1ac4{HTTP/1.1, (http/1.1)}{localhost:44999} 2024-11-20T23:38:11,258 INFO [Time-limited test {}] server.Server(415): Started @161458ms 2024-11-20T23:38:11,273 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:38:11,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:11,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:11,565 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:38:11,568 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:38:11,576 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:38:11,576 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:38:11,577 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T23:38:11,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30a928dc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:38:11,578 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@719d6bc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:38:11,700 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16178224{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/java.io.tmpdir/jetty-localhost-39847-hadoop-hdfs-3_4_1-tests_jar-_-any-3501132670693878263/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:11,701 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34c1099f{HTTP/1.1, (http/1.1)}{localhost:39847} 2024-11-20T23:38:11,702 INFO [Time-limited test {}] server.Server(415): Started @161901ms 2024-11-20T23:38:11,703 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:38:11,732 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:38:11,737 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:38:11,738 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:38:11,738 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:38:11,738 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T23:38:11,739 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8825f29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:38:11,739 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@718cd5f1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:38:11,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d10ba6a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/java.io.tmpdir/jetty-localhost-46071-hadoop-hdfs-3_4_1-tests_jar-_-any-7510841782504798145/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:11,848 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b91ed3f{HTTP/1.1, (http/1.1)}{localhost:46071} 2024-11-20T23:38:11,848 INFO [Time-limited test {}] server.Server(415): Started @162048ms 2024-11-20T23:38:11,849 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:38:12,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:12,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:13,325 WARN [Thread-1220 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data1/current/BP-1780958559-172.17.0.2-1732145890772/current, will proceed with Du for space computation calculation, 2024-11-20T23:38:13,325 WARN [Thread-1221 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data2/current/BP-1780958559-172.17.0.2-1732145890772/current, will proceed with Du for space computation calculation, 2024-11-20T23:38:13,344 WARN [Thread-1184 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:38:13,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x47e2e1584d05531c with lease ID 0x368253c9e5e88a31: Processing first storage report for DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec from datanode DatanodeRegistration(127.0.0.1:38013, datanodeUuid=09b8d872-1fe7-45a4-9f26-fbd4a3e235af, infoPort=38727, infoSecurePort=0, ipcPort=36829, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772) 2024-11-20T23:38:13,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x47e2e1584d05531c with lease ID 0x368253c9e5e88a31: from storage DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec node DatanodeRegistration(127.0.0.1:38013, datanodeUuid=09b8d872-1fe7-45a4-9f26-fbd4a3e235af, infoPort=38727, infoSecurePort=0, ipcPort=36829, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:38:13,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x47e2e1584d05531c with lease ID 0x368253c9e5e88a31: Processing first storage report for DS-3cc52cd6-a12d-4bb7-be9f-f014465f45f5 from datanode DatanodeRegistration(127.0.0.1:38013, datanodeUuid=09b8d872-1fe7-45a4-9f26-fbd4a3e235af, infoPort=38727, infoSecurePort=0, ipcPort=36829, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772) 2024-11-20T23:38:13,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x47e2e1584d05531c with lease ID 0x368253c9e5e88a31: from storage DS-3cc52cd6-a12d-4bb7-be9f-f014465f45f5 node DatanodeRegistration(127.0.0.1:38013, datanodeUuid=09b8d872-1fe7-45a4-9f26-fbd4a3e235af, infoPort=38727, infoSecurePort=0, ipcPort=36829, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:38:13,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:13,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:13,465 WARN [Thread-1232 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data4/current/BP-1780958559-172.17.0.2-1732145890772/current, will proceed with Du for space computation calculation, 2024-11-20T23:38:13,465 WARN [Thread-1231 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data3/current/BP-1780958559-172.17.0.2-1732145890772/current, will proceed with Du for space computation calculation, 2024-11-20T23:38:13,483 WARN [Thread-1207 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:38:13,485 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf9fef457f892c53e with lease ID 0x368253c9e5e88a32: Processing first storage report for DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5 from datanode DatanodeRegistration(127.0.0.1:42623, datanodeUuid=a90ad449-2e14-4fae-ac44-0475925d20ac, infoPort=39701, infoSecurePort=0, ipcPort=42391, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772) 2024-11-20T23:38:13,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf9fef457f892c53e with lease ID 0x368253c9e5e88a32: from storage DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5 node DatanodeRegistration(127.0.0.1:42623, datanodeUuid=a90ad449-2e14-4fae-ac44-0475925d20ac, infoPort=39701, infoSecurePort=0, ipcPort=42391, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:38:13,485 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf9fef457f892c53e with lease ID 0x368253c9e5e88a32: Processing first storage report for DS-848fe04f-f36b-463c-9a5f-91cba0dd9880 from datanode DatanodeRegistration(127.0.0.1:42623, datanodeUuid=a90ad449-2e14-4fae-ac44-0475925d20ac, infoPort=39701, infoSecurePort=0, ipcPort=42391, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772) 2024-11-20T23:38:13,486 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf9fef457f892c53e with lease ID 0x368253c9e5e88a32: from storage DS-848fe04f-f36b-463c-9a5f-91cba0dd9880 node DatanodeRegistration(127.0.0.1:42623, datanodeUuid=a90ad449-2e14-4fae-ac44-0475925d20ac, infoPort=39701, infoSecurePort=0, ipcPort=42391, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:38:13,589 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9 2024-11-20T23:38:13,593 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/zookeeper_0, clientPort=59058, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T23:38:13,594 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59058 2024-11-20T23:38:13,594 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:13,596 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:13,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42623 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:38:13,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:38:13,620 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d with version=8 2024-11-20T23:38:13,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/hbase-staging 2024-11-20T23:38:13,622 INFO [Time-limited test {}] client.ConnectionUtils(128): master/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:38:13,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:38:13,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:38:13,622 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:38:13,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:38:13,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:38:13,623 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T23:38:13,623 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:38:13,623 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39051 2024-11-20T23:38:13,625 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39051 connecting to ZooKeeper ensemble=127.0.0.1:59058 2024-11-20T23:38:13,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:390510x0, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:38:13,682 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39051-0x1015a9ccc830000 connected 2024-11-20T23:38:13,767 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:13,769 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:13,771 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:38:13,772 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d, hbase.cluster.distributed=false 2024-11-20T23:38:13,774 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:38:13,775 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39051 2024-11-20T23:38:13,775 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39051 2024-11-20T23:38:13,776 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39051 2024-11-20T23:38:13,776 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39051 2024-11-20T23:38:13,776 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39051 2024-11-20T23:38:13,800 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:38:13,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:38:13,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:38:13,800 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:38:13,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:38:13,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:38:13,800 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T23:38:13,800 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:38:13,801 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34697 2024-11-20T23:38:13,803 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34697 connecting to ZooKeeper ensemble=127.0.0.1:59058 2024-11-20T23:38:13,804 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:13,806 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:13,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:346970x0, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:38:13,820 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34697-0x1015a9ccc830001 connected 2024-11-20T23:38:13,820 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:38:13,820 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T23:38:13,821 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T23:38:13,821 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T23:38:13,822 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:38:13,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34697 2024-11-20T23:38:13,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34697 2024-11-20T23:38:13,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34697 2024-11-20T23:38:13,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34697 2024-11-20T23:38:13,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34697 2024-11-20T23:38:13,836 DEBUG [M:0;412a5e44fd2e:39051 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;412a5e44fd2e:39051 2024-11-20T23:38:13,836 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/412a5e44fd2e,39051,1732145893622 2024-11-20T23:38:13,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:38:13,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:38:13,846 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/412a5e44fd2e,39051,1732145893622 2024-11-20T23:38:13,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T23:38:13,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:13,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:13,857 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T23:38:13,857 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/412a5e44fd2e,39051,1732145893622 from backup master directory 2024-11-20T23:38:13,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/412a5e44fd2e,39051,1732145893622 2024-11-20T23:38:13,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:38:13,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:38:13,866 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:38:13,867 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=412a5e44fd2e,39051,1732145893622 2024-11-20T23:38:13,871 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/hbase.id] with ID: dc35a5de-0671-4d66-8128-ae5214d5eca5 2024-11-20T23:38:13,872 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/.tmp/hbase.id 2024-11-20T23:38:13,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:38:13,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42623 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:38:13,879 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/.tmp/hbase.id]:[hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/hbase.id] 2024-11-20T23:38:13,894 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:13,894 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T23:38:13,896 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-20T23:38:13,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:13,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:13,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42623 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:38:13,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:38:13,918 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T23:38:13,919 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T23:38:13,919 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:38:13,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:38:13,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42623 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:38:13,929 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store 2024-11-20T23:38:13,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42623 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:38:13,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:38:13,939 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:38:13,939 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:38:13,939 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:13,939 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:13,939 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:38:13,939 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:13,939 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:13,939 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732145893939Disabling compacts and flushes for region at 1732145893939Disabling writes for close at 1732145893939Writing region close event to WAL at 1732145893939Closed at 1732145893939 2024-11-20T23:38:13,940 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/.initializing 2024-11-20T23:38:13,940 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/WALs/412a5e44fd2e,39051,1732145893622 2024-11-20T23:38:13,943 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C39051%2C1732145893622, suffix=, logDir=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/WALs/412a5e44fd2e,39051,1732145893622, archiveDir=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/oldWALs, maxLogs=10 2024-11-20T23:38:13,943 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C39051%2C1732145893622.1732145893943 2024-11-20T23:38:13,948 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/WALs/412a5e44fd2e,39051,1732145893622/412a5e44fd2e%2C39051%2C1732145893622.1732145893943 2024-11-20T23:38:13,952 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38727:38727),(127.0.0.1/127.0.0.1:39701:39701)] 2024-11-20T23:38:13,960 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:38:13,960 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:38:13,960 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:13,960 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:13,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:13,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T23:38:13,966 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:13,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:13,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:13,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T23:38:13,968 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:13,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:38:13,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:13,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T23:38:13,970 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:13,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:38:13,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:13,972 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T23:38:13,972 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:13,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:38:13,973 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:13,974 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:13,974 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:13,975 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:13,975 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:13,976 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T23:38:13,977 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:13,979 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:38:13,979 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792113, jitterRate=0.0072238147258758545}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T23:38:13,981 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732145893960Initializing all the Stores at 1732145893961 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145893961Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145893964 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145893964Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145893964Cleaning up temporary data from old regions at 1732145893975 (+11 ms)Region opened successfully at 1732145893980 (+5 ms) 2024-11-20T23:38:13,981 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T23:38:13,985 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@243a5a89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:38:13,986 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T23:38:13,986 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T23:38:13,986 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T23:38:13,986 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T23:38:13,987 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T23:38:13,987 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T23:38:13,987 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T23:38:13,990 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T23:38:13,992 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T23:38:14,003 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T23:38:14,004 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T23:38:14,005 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T23:38:14,014 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T23:38:14,014 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T23:38:14,016 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T23:38:14,024 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T23:38:14,026 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T23:38:14,035 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T23:38:14,037 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T23:38:14,045 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T23:38:14,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:38:14,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:14,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:38:14,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:14,056 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=412a5e44fd2e,39051,1732145893622, sessionid=0x1015a9ccc830000, setting cluster-up flag (Was=false) 2024-11-20T23:38:14,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:14,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:14,109 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T23:38:14,110 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,39051,1732145893622 2024-11-20T23:38:14,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:14,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:14,161 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T23:38:14,162 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,39051,1732145893622 2024-11-20T23:38:14,164 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T23:38:14,165 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T23:38:14,166 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T23:38:14,166 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T23:38:14,166 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 412a5e44fd2e,39051,1732145893622 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T23:38:14,167 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:38:14,167 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:38:14,167 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:38:14,167 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:38:14,168 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/412a5e44fd2e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T23:38:14,168 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,168 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:38:14,168 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,169 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732145924169 2024-11-20T23:38:14,169 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T23:38:14,170 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T23:38:14,170 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T23:38:14,170 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T23:38:14,170 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T23:38:14,170 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T23:38:14,170 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,170 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:38:14,170 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T23:38:14,171 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T23:38:14,171 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T23:38:14,171 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T23:38:14,171 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T23:38:14,172 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T23:38:14,172 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145894172,5,FailOnTimeoutGroup] 2024-11-20T23:38:14,172 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:14,172 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145894172,5,FailOnTimeoutGroup] 2024-11-20T23:38:14,172 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,172 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T23:38:14,172 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,172 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,172 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T23:38:14,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:38:14,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42623 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:38:14,180 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T23:38:14,181 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d 2024-11-20T23:38:14,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42623 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:38:14,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:38:14,225 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(746): ClusterId : dc35a5de-0671-4d66-8128-ae5214d5eca5 2024-11-20T23:38:14,226 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T23:38:14,236 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T23:38:14,236 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T23:38:14,247 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T23:38:14,247 DEBUG [RS:0;412a5e44fd2e:34697 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21ad8a89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:38:14,265 DEBUG [RS:0;412a5e44fd2e:34697 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;412a5e44fd2e:34697 2024-11-20T23:38:14,265 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T23:38:14,265 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T23:38:14,265 DEBUG [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T23:38:14,266 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(2659): reportForDuty to master=412a5e44fd2e,39051,1732145893622 with port=34697, startcode=1732145893799 2024-11-20T23:38:14,266 DEBUG [RS:0;412a5e44fd2e:34697 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T23:38:14,268 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42285, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T23:38:14,269 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39051 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:14,269 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39051 {}] master.ServerManager(517): Registering regionserver=412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:14,271 DEBUG [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d 2024-11-20T23:38:14,271 DEBUG [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46843 2024-11-20T23:38:14,271 DEBUG [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T23:38:14,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:38:14,282 DEBUG [RS:0;412a5e44fd2e:34697 {}] zookeeper.ZKUtil(111): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:14,282 WARN [RS:0;412a5e44fd2e:34697 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:38:14,283 INFO [RS:0;412a5e44fd2e:34697 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:38:14,283 DEBUG [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:14,283 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [412a5e44fd2e,34697,1732145893799] 2024-11-20T23:38:14,287 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T23:38:14,289 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T23:38:14,289 INFO [RS:0;412a5e44fd2e:34697 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T23:38:14,289 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,289 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T23:38:14,290 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T23:38:14,290 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:38:14,291 DEBUG [RS:0;412a5e44fd2e:34697 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:38:14,292 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,292 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,292 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,292 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,292 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,292 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,34697,1732145893799-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:38:14,309 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T23:38:14,309 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,34697,1732145893799-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,309 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,310 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.Replication(171): 412a5e44fd2e,34697,1732145893799 started 2024-11-20T23:38:14,326 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:14,326 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(1482): Serving as 412a5e44fd2e,34697,1732145893799, RpcServer on 412a5e44fd2e/172.17.0.2:34697, sessionid=0x1015a9ccc830001 2024-11-20T23:38:14,326 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T23:38:14,326 DEBUG [RS:0;412a5e44fd2e:34697 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:14,326 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,34697,1732145893799' 2024-11-20T23:38:14,326 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T23:38:14,327 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T23:38:14,327 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T23:38:14,327 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T23:38:14,327 DEBUG [RS:0;412a5e44fd2e:34697 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:14,327 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,34697,1732145893799' 2024-11-20T23:38:14,327 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T23:38:14,327 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T23:38:14,328 DEBUG [RS:0;412a5e44fd2e:34697 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T23:38:14,328 INFO [RS:0;412a5e44fd2e:34697 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T23:38:14,328 INFO [RS:0;412a5e44fd2e:34697 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T23:38:14,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:14,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:14,430 INFO [RS:0;412a5e44fd2e:34697 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C34697%2C1732145893799, suffix=, logDir=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799, archiveDir=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/oldWALs, maxLogs=32 2024-11-20T23:38:14,431 INFO [RS:0;412a5e44fd2e:34697 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C34697%2C1732145893799.1732145894431 2024-11-20T23:38:14,438 INFO [RS:0;412a5e44fd2e:34697 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 2024-11-20T23:38:14,441 DEBUG [RS:0;412a5e44fd2e:34697 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39701:39701),(127.0.0.1/127.0.0.1:38727:38727)] 2024-11-20T23:38:14,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:38:14,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:38:14,594 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:38:14,594 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:14,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:14,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:38:14,597 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:38:14,597 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:14,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:14,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:38:14,600 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:38:14,600 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:14,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:14,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:38:14,603 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:38:14,603 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:14,604 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:14,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:38:14,605 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740 2024-11-20T23:38:14,606 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740 2024-11-20T23:38:14,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:38:14,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:38:14,608 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:38:14,610 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:38:14,612 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:38:14,613 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750709, jitterRate=-0.045425042510032654}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:38:14,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732145894590Initializing all the Stores at 1732145894592 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145894592Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145894592Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145894592Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145894592Cleaning up temporary data from old regions at 1732145894608 (+16 ms)Region opened successfully at 1732145894614 (+6 ms) 2024-11-20T23:38:14,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:38:14,614 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:38:14,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:38:14,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:38:14,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:38:14,614 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:38:14,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732145894614Disabling compacts and flushes for region at 1732145894614Disabling writes for close at 1732145894614Writing region close event to WAL at 1732145894614Closed at 1732145894614 2024-11-20T23:38:14,616 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:38:14,616 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T23:38:14,616 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T23:38:14,618 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:38:14,619 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T23:38:14,769 DEBUG [412a5e44fd2e:39051 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T23:38:14,770 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:14,771 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,34697,1732145893799, state=OPENING 2024-11-20T23:38:14,787 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T23:38:14,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:14,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:14,799 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:38:14,799 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:38:14,799 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:38:14,799 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,34697,1732145893799}] 2024-11-20T23:38:14,953 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T23:38:14,956 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51253, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T23:38:14,961 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T23:38:14,961 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:38:14,963 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C34697%2C1732145893799.meta, suffix=.meta, logDir=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799, archiveDir=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/oldWALs, maxLogs=32 2024-11-20T23:38:14,964 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C34697%2C1732145893799.meta.1732145894964.meta 2024-11-20T23:38:14,970 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.meta.1732145894964.meta 2024-11-20T23:38:14,971 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38727:38727),(127.0.0.1/127.0.0.1:39701:39701)] 2024-11-20T23:38:14,971 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:38:14,972 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T23:38:14,972 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T23:38:14,972 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T23:38:14,972 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T23:38:14,972 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:38:14,972 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T23:38:14,972 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T23:38:14,973 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:38:14,974 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:38:14,974 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:14,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:14,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:38:14,976 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:38:14,976 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:14,976 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:14,976 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:38:14,977 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:38:14,977 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:14,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:14,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:38:14,979 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:38:14,979 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:14,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:14,979 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:38:14,980 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740 2024-11-20T23:38:14,981 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740 2024-11-20T23:38:14,983 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:38:14,983 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:38:14,983 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:38:14,985 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:38:14,986 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689678, jitterRate=-0.12303023040294647}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:38:14,986 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T23:38:14,987 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732145894972Writing region info on filesystem at 1732145894972Initializing all the Stores at 1732145894973 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145894973Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145894973Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145894973Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145894973Cleaning up temporary data from old regions at 1732145894983 (+10 ms)Running coprocessor post-open hooks at 1732145894986 (+3 ms)Region opened successfully at 1732145894986 2024-11-20T23:38:14,988 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732145894953 2024-11-20T23:38:14,990 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T23:38:14,990 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T23:38:14,991 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:14,992 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,34697,1732145893799, state=OPEN 2024-11-20T23:38:15,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:38:15,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:38:15,067 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:15,067 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:38:15,067 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:38:15,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T23:38:15,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,34697,1732145893799 in 268 msec 2024-11-20T23:38:15,073 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T23:38:15,073 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 454 msec 2024-11-20T23:38:15,074 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:38:15,074 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T23:38:15,076 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:38:15,076 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,34697,1732145893799, seqNum=-1] 2024-11-20T23:38:15,076 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:38:15,078 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52395, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:38:15,084 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 918 msec 2024-11-20T23:38:15,084 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732145895084, completionTime=-1 2024-11-20T23:38:15,084 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T23:38:15,085 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T23:38:15,087 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T23:38:15,087 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732145955087 2024-11-20T23:38:15,087 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732146015087 2024-11-20T23:38:15,087 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-20T23:38:15,087 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,39051,1732145893622-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:15,087 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,39051,1732145893622-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:15,087 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,39051,1732145893622-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:15,088 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-412a5e44fd2e:39051, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:15,088 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:15,088 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:15,090 DEBUG [master/412a5e44fd2e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T23:38:15,093 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.226sec 2024-11-20T23:38:15,093 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T23:38:15,093 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T23:38:15,093 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T23:38:15,093 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T23:38:15,093 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T23:38:15,093 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,39051,1732145893622-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:38:15,093 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,39051,1732145893622-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T23:38:15,096 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T23:38:15,096 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T23:38:15,096 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,39051,1732145893622-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:15,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d3d6246, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:38:15,126 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 412a5e44fd2e,39051,-1 for getting cluster id 2024-11-20T23:38:15,126 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T23:38:15,128 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dc35a5de-0671-4d66-8128-ae5214d5eca5' 2024-11-20T23:38:15,129 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T23:38:15,129 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dc35a5de-0671-4d66-8128-ae5214d5eca5" 2024-11-20T23:38:15,129 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@707b4c03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:38:15,129 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [412a5e44fd2e,39051,-1] 2024-11-20T23:38:15,130 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T23:38:15,130 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:15,132 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39338, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T23:38:15,133 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@370b05c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:38:15,133 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:38:15,135 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,34697,1732145893799, seqNum=-1] 2024-11-20T23:38:15,135 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:38:15,137 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35724, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:38:15,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=412a5e44fd2e,39051,1732145893622 2024-11-20T23:38:15,139 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:15,143 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T23:38:15,143 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-20T23:38:15,143 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-20T23:38:15,143 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T23:38:15,145 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 412a5e44fd2e,39051,1732145893622 2024-11-20T23:38:15,145 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@354823d8 2024-11-20T23:38:15,145 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T23:38:15,147 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39352, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T23:38:15,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39051 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T23:38:15,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39051 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T23:38:15,148 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39051 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T23:38:15,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39051 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T23:38:15,151 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T23:38:15,151 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:15,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39051 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-20T23:38:15,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39051 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T23:38:15,152 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T23:38:15,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42623 is added to blk_1073741835_1011 (size=395) 2024-11-20T23:38:15,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741835_1011 (size=395) 2024-11-20T23:38:15,172 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a59d4386f48f26cd3b613aeb715759b7, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d 2024-11-20T23:38:15,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741836_1012 (size=78) 2024-11-20T23:38:15,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42623 is added to blk_1073741836_1012 (size=78) 2024-11-20T23:38:15,182 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:38:15,182 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing a59d4386f48f26cd3b613aeb715759b7, disabling compactions & flushes 2024-11-20T23:38:15,182 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:15,182 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:15,183 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. after waiting 0 ms 2024-11-20T23:38:15,183 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:15,183 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:15,183 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for a59d4386f48f26cd3b613aeb715759b7: Waiting for close lock at 1732145895182Disabling compacts and flushes for region at 1732145895182Disabling writes for close at 1732145895183 (+1 ms)Writing region close event to WAL at 1732145895183Closed at 1732145895183 2024-11-20T23:38:15,184 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T23:38:15,184 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732145895184"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732145895184"}]},"ts":"1732145895184"} 2024-11-20T23:38:15,187 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T23:38:15,188 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T23:38:15,189 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732145895189"}]},"ts":"1732145895189"} 2024-11-20T23:38:15,191 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-20T23:38:15,192 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a59d4386f48f26cd3b613aeb715759b7, ASSIGN}] 2024-11-20T23:38:15,193 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a59d4386f48f26cd3b613aeb715759b7, ASSIGN 2024-11-20T23:38:15,194 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a59d4386f48f26cd3b613aeb715759b7, ASSIGN; state=OFFLINE, location=412a5e44fd2e,34697,1732145893799; forceNewPlan=false, retain=false 2024-11-20T23:38:15,345 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a59d4386f48f26cd3b613aeb715759b7, regionState=OPENING, regionLocation=412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:15,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a59d4386f48f26cd3b613aeb715759b7, ASSIGN because future has completed 2024-11-20T23:38:15,348 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a59d4386f48f26cd3b613aeb715759b7, server=412a5e44fd2e,34697,1732145893799}] 2024-11-20T23:38:15,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:15,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:15,507 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:15,507 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a59d4386f48f26cd3b613aeb715759b7, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7.', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:38:15,508 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:15,508 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:38:15,508 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:15,508 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:15,509 INFO [StoreOpener-a59d4386f48f26cd3b613aeb715759b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:15,511 INFO [StoreOpener-a59d4386f48f26cd3b613aeb715759b7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a59d4386f48f26cd3b613aeb715759b7 columnFamilyName info 2024-11-20T23:38:15,511 DEBUG [StoreOpener-a59d4386f48f26cd3b613aeb715759b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:15,512 INFO [StoreOpener-a59d4386f48f26cd3b613aeb715759b7-1 {}] regionserver.HStore(327): Store=a59d4386f48f26cd3b613aeb715759b7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:38:15,512 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:15,512 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/default/TestLogRolling-testLogRollOnPipelineRestart/a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:15,513 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/default/TestLogRolling-testLogRollOnPipelineRestart/a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:15,513 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:15,513 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:15,516 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:15,519 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/default/TestLogRolling-testLogRollOnPipelineRestart/a59d4386f48f26cd3b613aeb715759b7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:38:15,519 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a59d4386f48f26cd3b613aeb715759b7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866592, jitterRate=0.10192984342575073}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T23:38:15,520 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:15,520 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a59d4386f48f26cd3b613aeb715759b7: Running coprocessor pre-open hook at 1732145895508Writing region info on filesystem at 1732145895508Initializing all the Stores at 1732145895509 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145895509Cleaning up temporary data from old regions at 1732145895513 (+4 ms)Running coprocessor post-open hooks at 1732145895520 (+7 ms)Region opened successfully at 1732145895520 2024-11-20T23:38:15,522 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7., pid=6, masterSystemTime=1732145895501 2024-11-20T23:38:15,524 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:15,524 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:15,525 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a59d4386f48f26cd3b613aeb715759b7, regionState=OPEN, openSeqNum=2, regionLocation=412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:15,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a59d4386f48f26cd3b613aeb715759b7, server=412a5e44fd2e,34697,1732145893799 because future has completed 2024-11-20T23:38:15,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T23:38:15,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a59d4386f48f26cd3b613aeb715759b7, server=412a5e44fd2e,34697,1732145893799 in 181 msec 2024-11-20T23:38:15,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T23:38:15,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a59d4386f48f26cd3b613aeb715759b7, ASSIGN in 341 msec 2024-11-20T23:38:15,538 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T23:38:15,538 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732145895538"}]},"ts":"1732145895538"} 2024-11-20T23:38:15,541 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-20T23:38:15,542 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T23:38:15,545 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 394 msec 2024-11-20T23:38:16,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:16,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:16,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T23:38:16,999 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T23:38:17,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T23:38:17,000 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-20T23:38:17,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:38:17,000 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T23:38:17,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:17,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:18,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:18,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:19,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:19,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:19,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:19,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:19,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:19,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:19,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:19,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:19,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:19,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:19,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:19,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:20,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:20,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:20,499 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T23:38:20,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:20,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:20,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:20,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:20,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:20,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:20,529 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:20,530 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:20,530 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:20,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:20,541 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T23:38:20,542 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-20T23:38:21,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:21,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:22,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:22,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:23,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:23,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:24,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:24,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:25,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39051 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T23:38:25,236 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-20T23:38:25,236 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-20T23:38:25,240 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T23:38:25,240 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:25,245 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7., hostname=412a5e44fd2e,34697,1732145893799, seqNum=2] 2024-11-20T23:38:25,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:25,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:26,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:26,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:27,248 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 2024-11-20T23:38:27,249 WARN [ResponseProcessor for block BP-1780958559-172.17.0.2-1732145890772:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1780958559-172.17.0.2-1732145890772:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1780958559-172.17.0.2-1732145890772:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:42623,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:27,249 WARN [ResponseProcessor for block BP-1780958559-172.17.0.2-1732145890772:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1780958559-172.17.0.2-1732145890772:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1780958559-172.17.0.2-1732145890772:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:42623,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:27,249 WARN [ResponseProcessor for block BP-1780958559-172.17.0.2-1732145890772:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1780958559-172.17.0.2-1732145890772:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:27,250 WARN [DataStreamer for file /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 block BP-1780958559-172.17.0.2-1732145890772:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1780958559-172.17.0.2-1732145890772:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42623,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK], DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42623,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK]) is bad. 2024-11-20T23:38:27,250 WARN [DataStreamer for file /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.meta.1732145894964.meta block BP-1780958559-172.17.0.2-1732145890772:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1780958559-172.17.0.2-1732145890772:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK], DatanodeInfoWithStorage[127.0.0.1:42623,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42623,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK]) is bad. 2024-11-20T23:38:27,250 WARN [PacketResponder: BP-1780958559-172.17.0.2-1732145890772:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42623] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:27,250 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_431632814_22 at /127.0.0.1:55262 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55262 dst: /127.0.0.1:42623 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:27,250 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_431632814_22 at /127.0.0.1:45910 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38013:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45910 dst: /127.0.0.1:38013 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:27,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_431632814_22 at /127.0.0.1:55288 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55288 dst: /127.0.0.1:42623 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:27,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_431632814_22 at /127.0.0.1:45908 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38013:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45908 dst: /127.0.0.1:38013 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:27,256 WARN [DataStreamer for file /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/WALs/412a5e44fd2e,39051,1732145893622/412a5e44fd2e%2C39051%2C1732145893622.1732145893943 block BP-1780958559-172.17.0.2-1732145890772:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1780958559-172.17.0.2-1732145890772:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK], DatanodeInfoWithStorage[127.0.0.1:42623,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42623,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK]) is bad. 2024-11-20T23:38:27,256 WARN [PacketResponder: BP-1780958559-172.17.0.2-1732145890772:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42623] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:27,257 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-450276426_22 at /127.0.0.1:45880 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38013:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45880 dst: /127.0.0.1:38013 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:27,257 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-450276426_22 at /127.0.0.1:55232 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55232 dst: /127.0.0.1:42623 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:27,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d10ba6a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:27,260 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b91ed3f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:38:27,261 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:38:27,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@718cd5f1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:38:27,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8825f29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,STOPPED} 2024-11-20T23:38:27,262 WARN [BP-1780958559-172.17.0.2-1732145890772 heartbeating to localhost/127.0.0.1:46843 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:38:27,262 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:38:27,262 WARN [BP-1780958559-172.17.0.2-1732145890772 heartbeating to localhost/127.0.0.1:46843 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1780958559-172.17.0.2-1732145890772 (Datanode Uuid a90ad449-2e14-4fae-ac44-0475925d20ac) service to localhost/127.0.0.1:46843 2024-11-20T23:38:27,263 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:38:27,263 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data3/current/BP-1780958559-172.17.0.2-1732145890772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:27,263 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data4/current/BP-1780958559-172.17.0.2-1732145890772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:27,264 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:38:27,271 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:38:27,274 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:38:27,275 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:38:27,275 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:38:27,275 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:38:27,276 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@718ea2f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:38:27,276 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@167fd01b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:38:27,380 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ca8564b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/java.io.tmpdir/jetty-localhost-35521-hadoop-hdfs-3_4_1-tests_jar-_-any-10386666315237879811/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:27,380 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fa662a{HTTP/1.1, (http/1.1)}{localhost:35521} 2024-11-20T23:38:27,380 INFO [Time-limited test {}] server.Server(415): Started @177580ms 2024-11-20T23:38:27,381 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:38:27,408 WARN [ResponseProcessor for block BP-1780958559-172.17.0.2-1732145890772:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1780958559-172.17.0.2-1732145890772:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:27,408 WARN [ResponseProcessor for block BP-1780958559-172.17.0.2-1732145890772:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1780958559-172.17.0.2-1732145890772:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:27,408 WARN [ResponseProcessor for block BP-1780958559-172.17.0.2-1732145890772:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1780958559-172.17.0.2-1732145890772:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:27,408 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_431632814_22 at /127.0.0.1:52214 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38013:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52214 dst: /127.0.0.1:38013 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:27,408 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_431632814_22 at /127.0.0.1:52216 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38013:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52216 dst: /127.0.0.1:38013 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:27,408 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-450276426_22 at /127.0.0.1:52230 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38013:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52230 dst: /127.0.0.1:38013 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:27,413 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16178224{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:27,413 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34c1099f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:38:27,414 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:38:27,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@719d6bc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:38:27,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30a928dc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,STOPPED} 2024-11-20T23:38:27,415 WARN [BP-1780958559-172.17.0.2-1732145890772 heartbeating to localhost/127.0.0.1:46843 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:38:27,415 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:38:27,415 WARN [BP-1780958559-172.17.0.2-1732145890772 heartbeating to localhost/127.0.0.1:46843 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1780958559-172.17.0.2-1732145890772 (Datanode Uuid 09b8d872-1fe7-45a4-9f26-fbd4a3e235af) service to localhost/127.0.0.1:46843 2024-11-20T23:38:27,415 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:38:27,415 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data1/current/BP-1780958559-172.17.0.2-1732145890772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:27,415 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data2/current/BP-1780958559-172.17.0.2-1732145890772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:27,415 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:38:27,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:27,423 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:38:27,427 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:38:27,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:38:27,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:38:27,427 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:38:27,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c54cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:38:27,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2026736d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:38:27,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:27,533 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@528eeea6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/java.io.tmpdir/jetty-localhost-35785-hadoop-hdfs-3_4_1-tests_jar-_-any-13180332129681724501/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:27,534 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20466b6{HTTP/1.1, (http/1.1)}{localhost:35785} 2024-11-20T23:38:27,534 INFO [Time-limited test {}] server.Server(415): Started @177734ms 2024-11-20T23:38:27,535 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:38:27,881 WARN [Thread-1355 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:38:27,900 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd9b52dba833bf0a with lease ID 0x368253c9e5e88a33: from storage DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5 node DatanodeRegistration(127.0.0.1:42563, datanodeUuid=a90ad449-2e14-4fae-ac44-0475925d20ac, infoPort=36555, infoSecurePort=0, ipcPort=41779, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:38:27,900 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd9b52dba833bf0a with lease ID 0x368253c9e5e88a33: from storage DS-848fe04f-f36b-463c-9a5f-91cba0dd9880 node DatanodeRegistration(127.0.0.1:42563, datanodeUuid=a90ad449-2e14-4fae-ac44-0475925d20ac, infoPort=36555, infoSecurePort=0, ipcPort=41779, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T23:38:28,211 WARN [Thread-1375 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:38:28,214 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x63368a6e551f9c67 with lease ID 0x368253c9e5e88a34: from storage DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec node DatanodeRegistration(127.0.0.1:42403, datanodeUuid=09b8d872-1fe7-45a4-9f26-fbd4a3e235af, infoPort=44003, infoSecurePort=0, ipcPort=41703, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:38:28,214 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x63368a6e551f9c67 with lease ID 0x368253c9e5e88a34: from storage DS-3cc52cd6-a12d-4bb7-be9f-f014465f45f5 node DatanodeRegistration(127.0.0.1:42403, datanodeUuid=09b8d872-1fe7-45a4-9f26-fbd4a3e235af, infoPort=44003, infoSecurePort=0, ipcPort=41703, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T23:38:28,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:28,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:28,560 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-20T23:38:28,563 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-20T23:38:28,565 ERROR [FSHLog-0-hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d-prefix:412a5e44fd2e,34697,1732145893799 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:28,565 WARN [FSHLog-0-hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d-prefix:412a5e44fd2e,34697,1732145893799 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:28,565 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C34697%2C1732145893799:(num 1732145894431) roll requested 2024-11-20T23:38:28,566 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C34697%2C1732145893799.1732145908565 2024-11-20T23:38:28,571 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 newFile=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 2024-11-20T23:38:28,572 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:28,572 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:28,572 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:28,572 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:28,572 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:28,572 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 2024-11-20T23:38:28,572 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:28,573 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:28,573 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 2024-11-20T23:38:28,573 WARN [IPC Server handler 0 on default port 46843 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-11-20T23:38:28,573 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 after 0ms 2024-11-20T23:38:28,577 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36555:36555),(127.0.0.1/127.0.0.1:44003:44003)] 2024-11-20T23:38:28,577 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 is not closed yet, will try archiving it next time 2024-11-20T23:38:29,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:29,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:29,899 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T23:38:30,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:30,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:30,580 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-20T23:38:31,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:31,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:32,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:32,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:32,574 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 after 4001ms 2024-11-20T23:38:32,582 WARN [ResponseProcessor for block BP-1780958559-172.17.0.2-1732145890772:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1780958559-172.17.0.2-1732145890772:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1780958559-172.17.0.2-1732145890772:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:42403,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:32,582 WARN [DataStreamer for file /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 block BP-1780958559-172.17.0.2-1732145890772:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1780958559-172.17.0.2-1732145890772:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42563,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK], DatanodeInfoWithStorage[127.0.0.1:42403,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42403,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]) is bad. 2024-11-20T23:38:32,582 WARN [PacketResponder: BP-1780958559-172.17.0.2-1732145890772:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42403] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:32,583 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_431632814_22 at /127.0.0.1:40648 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42563:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40648 dst: /127.0.0.1:42563 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:32,583 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_431632814_22 at /127.0.0.1:37358 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42403:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37358 dst: /127.0.0.1:42403 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:32,585 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@528eeea6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:32,585 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20466b6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:38:32,586 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:38:32,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2026736d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:38:32,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c54cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,STOPPED} 2024-11-20T23:38:32,588 WARN [BP-1780958559-172.17.0.2-1732145890772 heartbeating to localhost/127.0.0.1:46843 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:38:32,588 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:38:32,588 WARN [BP-1780958559-172.17.0.2-1732145890772 heartbeating to localhost/127.0.0.1:46843 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1780958559-172.17.0.2-1732145890772 (Datanode Uuid 09b8d872-1fe7-45a4-9f26-fbd4a3e235af) service to localhost/127.0.0.1:46843 2024-11-20T23:38:32,588 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:38:32,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data1/current/BP-1780958559-172.17.0.2-1732145890772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:32,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data2/current/BP-1780958559-172.17.0.2-1732145890772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:32,590 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:38:32,599 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:38:32,602 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:38:32,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:38:32,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:38:32,603 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:38:32,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24dbb8ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:38:32,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7269a538{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:38:32,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f2f2023{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/java.io.tmpdir/jetty-localhost-39275-hadoop-hdfs-3_4_1-tests_jar-_-any-8742733204398509392/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:32,709 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ae70be3{HTTP/1.1, (http/1.1)}{localhost:39275} 2024-11-20T23:38:32,709 INFO [Time-limited test {}] server.Server(415): Started @182909ms 2024-11-20T23:38:32,711 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:38:32,732 WARN [ResponseProcessor for block BP-1780958559-172.17.0.2-1732145890772:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1780958559-172.17.0.2-1732145890772:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:32,733 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_431632814_22 at /127.0.0.1:46192 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42563:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46192 dst: /127.0.0.1:42563 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:32,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ca8564b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:32,770 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fa662a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:38:32,770 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:38:32,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@167fd01b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:38:32,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@718ea2f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,STOPPED} 2024-11-20T23:38:32,771 WARN [BP-1780958559-172.17.0.2-1732145890772 heartbeating to localhost/127.0.0.1:46843 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:38:32,771 WARN [BP-1780958559-172.17.0.2-1732145890772 heartbeating to localhost/127.0.0.1:46843 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1780958559-172.17.0.2-1732145890772 (Datanode Uuid a90ad449-2e14-4fae-ac44-0475925d20ac) service to localhost/127.0.0.1:46843 2024-11-20T23:38:32,771 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:38:32,771 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:38:32,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data3/current/BP-1780958559-172.17.0.2-1732145890772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:32,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data4/current/BP-1780958559-172.17.0.2-1732145890772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:32,772 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:38:32,779 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:38:32,783 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:38:32,783 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:38:32,783 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:38:32,783 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:38:32,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e60361d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:38:32,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ddd02f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:38:32,888 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@606a795b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/java.io.tmpdir/jetty-localhost-42877-hadoop-hdfs-3_4_1-tests_jar-_-any-14972334607381555512/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:32,889 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78f99ac1{HTTP/1.1, (http/1.1)}{localhost:42877} 2024-11-20T23:38:32,889 INFO [Time-limited test {}] server.Server(415): Started @183089ms 2024-11-20T23:38:32,890 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:38:33,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:33,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:33,538 WARN [Thread-1429 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:38:33,540 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1922780b1a57498 with lease ID 0x368253c9e5e88a35: from storage DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec node DatanodeRegistration(127.0.0.1:36821, datanodeUuid=09b8d872-1fe7-45a4-9f26-fbd4a3e235af, infoPort=33949, infoSecurePort=0, ipcPort=42357, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T23:38:33,540 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1922780b1a57498 with lease ID 0x368253c9e5e88a35: from storage DS-3cc52cd6-a12d-4bb7-be9f-f014465f45f5 node DatanodeRegistration(127.0.0.1:36821, datanodeUuid=09b8d872-1fe7-45a4-9f26-fbd4a3e235af, infoPort=33949, infoSecurePort=0, ipcPort=42357, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:38:33,611 WARN [Thread-1449 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:38:33,614 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8c23a4487832107 with lease ID 0x368253c9e5e88a36: from storage DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5 node DatanodeRegistration(127.0.0.1:46697, datanodeUuid=a90ad449-2e14-4fae-ac44-0475925d20ac, infoPort=34593, infoSecurePort=0, ipcPort=46283, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:38:33,614 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8c23a4487832107 with lease ID 0x368253c9e5e88a36: from storage DS-848fe04f-f36b-463c-9a5f-91cba0dd9880 node DatanodeRegistration(127.0.0.1:46697, datanodeUuid=a90ad449-2e14-4fae-ac44-0475925d20ac, infoPort=34593, infoSecurePort=0, ipcPort=46283, storageInfo=lv=-57;cid=testClusterID;nsid=25268180;c=1732145890772), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:38:33,909 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-20T23:38:33,911 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-20T23:38:33,913 ERROR [FSHLog-0-hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d-prefix:412a5e44fd2e,34697,1732145893799 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42563,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:33,913 WARN [FSHLog-0-hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d-prefix:412a5e44fd2e,34697,1732145893799 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42563,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:33,913 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C34697%2C1732145893799:(num 1732145908565) roll requested 2024-11-20T23:38:33,913 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C34697%2C1732145893799.1732145913913 2024-11-20T23:38:33,922 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 newFile=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145913913 2024-11-20T23:38:33,922 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:33,922 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:33,922 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:33,922 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:33,922 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:33,923 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145913913 2024-11-20T23:38:33,923 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42563,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:33,923 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42563,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:33,923 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 2024-11-20T23:38:33,923 WARN [IPC Server handler 1 on default port 46843 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-20T23:38:33,924 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 after 0ms 2024-11-20T23:38:33,924 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33949:33949),(127.0.0.1/127.0.0.1:34593:34593)] 2024-11-20T23:38:33,924 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 is not closed yet, will try archiving it next time 2024-11-20T23:38:34,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:34,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:35,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:35,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:35,539 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T23:38:35,925 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C34697%2C1732145893799.1732145915925 2024-11-20T23:38:35,933 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145913913 newFile=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 2024-11-20T23:38:35,933 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:35,933 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:35,933 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:35,933 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:35,933 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:35,934 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145913913 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 2024-11-20T23:38:35,935 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33949:33949),(127.0.0.1/127.0.0.1:34593:34593)] 2024-11-20T23:38:35,935 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 is not closed yet, will try archiving it next time 2024-11-20T23:38:35,935 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145913913 is not closed yet, will try archiving it next time 2024-11-20T23:38:35,935 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 2024-11-20T23:38:35,935 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 2024-11-20T23:38:35,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741838_1019 (size=1264) 2024-11-20T23:38:35,936 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 after 1ms 2024-11-20T23:38:35,936 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 2024-11-20T23:38:35,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741838_1019 (size=1264) 2024-11-20T23:38:35,937 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 is not closed yet, will try archiving it next time 2024-11-20T23:38:35,947 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732145895521/Put/vlen=218/seqid=0] 2024-11-20T23:38:35,947 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732145905246/Put/vlen=1045/seqid=0] 2024-11-20T23:38:35,948 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145894431 2024-11-20T23:38:35,948 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 2024-11-20T23:38:35,948 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 2024-11-20T23:38:35,948 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 after 0ms 2024-11-20T23:38:35,948 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 2024-11-20T23:38:35,952 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732145908565/Put/vlen=1045/seqid=0] 2024-11-20T23:38:35,952 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732145910580/Put/vlen=1045/seqid=0] 2024-11-20T23:38:35,952 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 2024-11-20T23:38:35,952 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145913913 2024-11-20T23:38:35,952 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145913913 2024-11-20T23:38:35,953 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145913913 after 0ms 2024-11-20T23:38:35,953 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145913913 2024-11-20T23:38:35,957 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732145913912/Put/vlen=1045/seqid=0] 2024-11-20T23:38:35,957 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 2024-11-20T23:38:35,957 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 2024-11-20T23:38:35,957 WARN [IPC Server handler 4 on default port 46843 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-20T23:38:35,958 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 after 1ms 2024-11-20T23:38:36,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:36,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:36,619 WARN [ResponseProcessor for block BP-1780958559-172.17.0.2-1732145890772:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1780958559-172.17.0.2-1732145890772:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:36,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-450276426_22 at /127.0.0.1:41104 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36821:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41104 dst: /127.0.0.1:36821 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:36821 remote=/127.0.0.1:41104]. Total timeout mills is 60000, 59313 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:36,620 WARN [DataStreamer for file /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 block BP-1780958559-172.17.0.2-1732145890772:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1780958559-172.17.0.2-1732145890772:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36821,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK], DatanodeInfoWithStorage[127.0.0.1:46697,DS-cdce7aeb-f6df-402e-afa4-7b5d2a7d97f5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36821,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]) is bad. 2024-11-20T23:38:36,620 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-450276426_22 at /127.0.0.1:59976 [Receiving block BP-1780958559-172.17.0.2-1732145890772:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46697:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59976 dst: /127.0.0.1:46697 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:38:36,626 WARN [DataStreamer for file /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 block BP-1780958559-172.17.0.2-1732145890772:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1780958559-172.17.0.2-1732145890772:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:36,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741839_1022 (size=85) 2024-11-20T23:38:37,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:37,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:37,925 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145908565 after 4002ms 2024-11-20T23:38:38,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:38,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:39,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:39,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:39,958 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 after 4001ms 2024-11-20T23:38:39,959 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 2024-11-20T23:38:39,963 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 2024-11-20T23:38:39,964 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing a59d4386f48f26cd3b613aeb715759b7 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-20T23:38:39,964 ERROR [FSHLog-0-hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d-prefix:412a5e44fd2e,34697,1732145893799 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1780958559-172.17.0.2-1732145890772:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:39,965 WARN [FSHLog-0-hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d-prefix:412a5e44fd2e,34697,1732145893799 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1780958559-172.17.0.2-1732145890772:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:39,965 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C34697%2C1732145893799:(num 1732145915925) roll requested 2024-11-20T23:38:39,966 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C34697%2C1732145893799.1732145919965 2024-11-20T23:38:39,971 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 newFile=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145919965 2024-11-20T23:38:39,971 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:39,972 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:39,972 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:39,972 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:39,972 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:39,972 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145919965 2024-11-20T23:38:39,972 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1780958559-172.17.0.2-1732145890772:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:39,973 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33949:33949),(127.0.0.1/127.0.0.1:34593:34593)] 2024-11-20T23:38:39,973 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1780958559-172.17.0.2-1732145890772:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:39,973 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 is not closed yet, will try archiving it next time 2024-11-20T23:38:39,973 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 2024-11-20T23:38:39,974 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 after 1ms 2024-11-20T23:38:39,974 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 to hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/oldWALs/412a5e44fd2e%2C34697%2C1732145893799.1732145915925 2024-11-20T23:38:39,991 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/default/TestLogRolling-testLogRollOnPipelineRestart/a59d4386f48f26cd3b613aeb715759b7/.tmp/info/182bea6b5366427eaff9895de5ff429f is 1080, key is row1002/info:/1732145905246/Put/seqid=0 2024-11-20T23:38:39,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741841_1024 (size=9270) 2024-11-20T23:38:39,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741841_1024 (size=9270) 2024-11-20T23:38:39,996 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/default/TestLogRolling-testLogRollOnPipelineRestart/a59d4386f48f26cd3b613aeb715759b7/.tmp/info/182bea6b5366427eaff9895de5ff429f 2024-11-20T23:38:40,004 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/default/TestLogRolling-testLogRollOnPipelineRestart/a59d4386f48f26cd3b613aeb715759b7/.tmp/info/182bea6b5366427eaff9895de5ff429f as hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/default/TestLogRolling-testLogRollOnPipelineRestart/a59d4386f48f26cd3b613aeb715759b7/info/182bea6b5366427eaff9895de5ff429f 2024-11-20T23:38:40,010 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/default/TestLogRolling-testLogRollOnPipelineRestart/a59d4386f48f26cd3b613aeb715759b7/info/182bea6b5366427eaff9895de5ff429f, entries=4, sequenceid=8, filesize=9.1 K 2024-11-20T23:38:40,012 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for a59d4386f48f26cd3b613aeb715759b7 in 48ms, sequenceid=8, compaction requested=false 2024-11-20T23:38:40,012 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a59d4386f48f26cd3b613aeb715759b7: 2024-11-20T23:38:40,012 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-20T23:38:40,012 ERROR [FSHLog-0-hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d-prefix:412a5e44fd2e,34697,1732145893799.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:40,012 WARN [FSHLog-0-hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d-prefix:412a5e44fd2e,34697,1732145893799.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:40,012 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C34697%2C1732145893799.meta:.meta(num 1732145894964) roll requested 2024-11-20T23:38:40,013 INFO [regionserver/412a5e44fd2e:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C34697%2C1732145893799.meta.1732145920013.meta 2024-11-20T23:38:40,018 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:40,018 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:40,018 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:40,018 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:40,018 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:40,018 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.meta.1732145894964.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.meta.1732145920013.meta 2024-11-20T23:38:40,019 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:40,019 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:40,019 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.meta.1732145894964.meta 2024-11-20T23:38:40,019 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34593:34593),(127.0.0.1/127.0.0.1:33949:33949)] 2024-11-20T23:38:40,019 DEBUG [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.meta.1732145894964.meta is not closed yet, will try archiving it next time 2024-11-20T23:38:40,019 WARN [IPC Server handler 1 on default port 46843 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.meta.1732145894964.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-11-20T23:38:40,020 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.meta.1732145894964.meta after 1ms 2024-11-20T23:38:40,034 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/.tmp/info/b296f3b920ac4a969aef11f7a8681034 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7./info:regioninfo/1732145895525/Put/seqid=0 2024-11-20T23:38:40,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741843_1027 (size=7125) 2024-11-20T23:38:40,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741843_1027 (size=7125) 2024-11-20T23:38:40,050 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/.tmp/info/b296f3b920ac4a969aef11f7a8681034 2024-11-20T23:38:40,070 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/.tmp/ns/cbb037bbc614453ca392e7bacc0f23fc is 43, key is default/ns:d/1732145895078/Put/seqid=0 2024-11-20T23:38:40,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741844_1028 (size=5153) 2024-11-20T23:38:40,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741844_1028 (size=5153) 2024-11-20T23:38:40,075 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/.tmp/ns/cbb037bbc614453ca392e7bacc0f23fc 2024-11-20T23:38:40,096 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/.tmp/table/dcb96fa90d5244e1893797d77b85b8ee is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732145895538/Put/seqid=0 2024-11-20T23:38:40,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741845_1029 (size=5438) 2024-11-20T23:38:40,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741845_1029 (size=5438) 2024-11-20T23:38:40,101 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/.tmp/table/dcb96fa90d5244e1893797d77b85b8ee 2024-11-20T23:38:40,107 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/.tmp/info/b296f3b920ac4a969aef11f7a8681034 as hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/info/b296f3b920ac4a969aef11f7a8681034 2024-11-20T23:38:40,114 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/info/b296f3b920ac4a969aef11f7a8681034, entries=10, sequenceid=11, filesize=7.0 K 2024-11-20T23:38:40,115 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/.tmp/ns/cbb037bbc614453ca392e7bacc0f23fc as hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/ns/cbb037bbc614453ca392e7bacc0f23fc 2024-11-20T23:38:40,121 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/ns/cbb037bbc614453ca392e7bacc0f23fc, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T23:38:40,122 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/.tmp/table/dcb96fa90d5244e1893797d77b85b8ee as hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/table/dcb96fa90d5244e1893797d77b85b8ee 2024-11-20T23:38:40,129 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/table/dcb96fa90d5244e1893797d77b85b8ee, entries=2, sequenceid=11, filesize=5.3 K 2024-11-20T23:38:40,130 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=11, compaction requested=false 2024-11-20T23:38:40,130 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-20T23:38:40,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T23:38:40,136 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:38:40,136 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:38:40,136 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:40,136 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:40,136 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T23:38:40,136 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T23:38:40,136 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=958659110, stopped=false 2024-11-20T23:38:40,136 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=412a5e44fd2e,39051,1732145893622 2024-11-20T23:38:40,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:38:40,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:38:40,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:40,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:40,155 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:38:40,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:38:40,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:38:40,155 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:38:40,156 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:38:40,156 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:40,156 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '412a5e44fd2e,34697,1732145893799' ***** 2024-11-20T23:38:40,156 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T23:38:40,156 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T23:38:40,156 INFO [RS:0;412a5e44fd2e:34697 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T23:38:40,156 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T23:38:40,157 INFO [RS:0;412a5e44fd2e:34697 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T23:38:40,157 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(3091): Received CLOSE for a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:40,157 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(959): stopping server 412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:40,157 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:38:40,157 INFO [RS:0;412a5e44fd2e:34697 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;412a5e44fd2e:34697. 2024-11-20T23:38:40,157 DEBUG [RS:0;412a5e44fd2e:34697 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:38:40,157 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a59d4386f48f26cd3b613aeb715759b7, disabling compactions & flushes 2024-11-20T23:38:40,157 DEBUG [RS:0;412a5e44fd2e:34697 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:40,157 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:40,157 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:40,157 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T23:38:40,157 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. after waiting 0 ms 2024-11-20T23:38:40,157 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T23:38:40,157 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:40,157 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T23:38:40,158 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T23:38:40,158 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T23:38:40,158 DEBUG [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(1325): Online Regions={a59d4386f48f26cd3b613aeb715759b7=TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T23:38:40,158 DEBUG [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a59d4386f48f26cd3b613aeb715759b7 2024-11-20T23:38:40,158 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:38:40,158 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:38:40,158 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:38:40,158 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:38:40,158 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:38:40,164 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/default/TestLogRolling-testLogRollOnPipelineRestart/a59d4386f48f26cd3b613aeb715759b7/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-20T23:38:40,164 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T23:38:40,164 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:40,164 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:38:40,164 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a59d4386f48f26cd3b613aeb715759b7: Waiting for close lock at 1732145920157Running coprocessor pre-close hooks at 1732145920157Disabling compacts and flushes for region at 1732145920157Disabling writes for close at 1732145920157Writing region close event to WAL at 1732145920159 (+2 ms)Running coprocessor post-close hooks at 1732145920164 (+5 ms)Closed at 1732145920164 2024-11-20T23:38:40,164 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:38:40,165 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732145895148.a59d4386f48f26cd3b613aeb715759b7. 2024-11-20T23:38:40,165 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732145920158Running coprocessor pre-close hooks at 1732145920158Disabling compacts and flushes for region at 1732145920158Disabling writes for close at 1732145920158Writing region close event to WAL at 1732145920160 (+2 ms)Running coprocessor post-close hooks at 1732145920164 (+4 ms)Closed at 1732145920164 2024-11-20T23:38:40,165 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T23:38:40,293 INFO [regionserver/412a5e44fd2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T23:38:40,293 INFO [regionserver/412a5e44fd2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T23:38:40,323 INFO [regionserver/412a5e44fd2e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:38:40,358 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(976): stopping server 412a5e44fd2e,34697,1732145893799; all regions closed. 2024-11-20T23:38:40,359 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:40,359 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:40,359 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:40,359 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:40,359 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:40,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741842_1025 (size=825) 2024-11-20T23:38:40,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741842_1025 (size=825) 2024-11-20T23:38:40,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:40,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:41,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:41,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:41,615 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T23:38:42,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:42,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:43,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:43,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:43,589 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T23:38:44,020 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.meta.1732145894964.meta after 4001ms 2024-11-20T23:38:44,021 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/WALs/412a5e44fd2e,34697,1732145893799/412a5e44fd2e%2C34697%2C1732145893799.meta.1732145894964.meta to hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/oldWALs/412a5e44fd2e%2C34697%2C1732145893799.meta.1732145894964.meta 2024-11-20T23:38:44,024 DEBUG [RS:0;412a5e44fd2e:34697 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/oldWALs 2024-11-20T23:38:44,024 INFO [RS:0;412a5e44fd2e:34697 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C34697%2C1732145893799.meta:.meta(num 1732145920013) 2024-11-20T23:38:44,024 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,024 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,025 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,025 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,025 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741840_1023 (size=1162) 2024-11-20T23:38:44,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741840_1023 (size=1162) 2024-11-20T23:38:44,033 DEBUG [RS:0;412a5e44fd2e:34697 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/oldWALs 2024-11-20T23:38:44,033 INFO [RS:0;412a5e44fd2e:34697 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C34697%2C1732145893799:(num 1732145919965) 2024-11-20T23:38:44,033 DEBUG [RS:0;412a5e44fd2e:34697 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:44,033 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:38:44,033 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:38:44,033 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.ChoreService(370): Chore service for: regionserver/412a5e44fd2e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T23:38:44,033 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:38:44,033 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:38:44,034 INFO [RS:0;412a5e44fd2e:34697 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34697 2024-11-20T23:38:44,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/412a5e44fd2e,34697,1732145893799 2024-11-20T23:38:44,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:38:44,102 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:38:44,102 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [412a5e44fd2e,34697,1732145893799] 2024-11-20T23:38:44,122 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/412a5e44fd2e,34697,1732145893799 already deleted, retry=false 2024-11-20T23:38:44,123 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 412a5e44fd2e,34697,1732145893799 expired; onlineServers=0 2024-11-20T23:38:44,123 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '412a5e44fd2e,39051,1732145893622' ***** 2024-11-20T23:38:44,123 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T23:38:44,123 INFO [M:0;412a5e44fd2e:39051 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:38:44,123 INFO [M:0;412a5e44fd2e:39051 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:38:44,123 DEBUG [M:0;412a5e44fd2e:39051 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T23:38:44,123 DEBUG [M:0;412a5e44fd2e:39051 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T23:38:44,123 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145894172 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145894172,5,FailOnTimeoutGroup] 2024-11-20T23:38:44,123 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145894172 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145894172,5,FailOnTimeoutGroup] 2024-11-20T23:38:44,123 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T23:38:44,123 INFO [M:0;412a5e44fd2e:39051 {}] hbase.ChoreService(370): Chore service for: master/412a5e44fd2e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T23:38:44,123 INFO [M:0;412a5e44fd2e:39051 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:38:44,123 DEBUG [M:0;412a5e44fd2e:39051 {}] master.HMaster(1795): Stopping service threads 2024-11-20T23:38:44,124 INFO [M:0;412a5e44fd2e:39051 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T23:38:44,124 INFO [M:0;412a5e44fd2e:39051 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:38:44,124 INFO [M:0;412a5e44fd2e:39051 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T23:38:44,124 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T23:38:44,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T23:38:44,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:44,133 DEBUG [M:0;412a5e44fd2e:39051 {}] zookeeper.ZKUtil(347): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T23:38:44,133 WARN [M:0;412a5e44fd2e:39051 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T23:38:44,134 INFO [M:0;412a5e44fd2e:39051 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/.lastflushedseqids 2024-11-20T23:38:44,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741846_1030 (size=111) 2024-11-20T23:38:44,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741846_1030 (size=111) 2024-11-20T23:38:44,141 INFO [M:0;412a5e44fd2e:39051 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T23:38:44,141 INFO [M:0;412a5e44fd2e:39051 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T23:38:44,141 DEBUG [M:0;412a5e44fd2e:39051 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:38:44,141 INFO [M:0;412a5e44fd2e:39051 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:44,141 DEBUG [M:0;412a5e44fd2e:39051 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:44,141 DEBUG [M:0;412a5e44fd2e:39051 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:38:44,141 DEBUG [M:0;412a5e44fd2e:39051 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:44,141 INFO [M:0;412a5e44fd2e:39051 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-20T23:38:44,142 ERROR [FSHLog-0-hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData-prefix:412a5e44fd2e,39051,1732145893622 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:44,142 WARN [FSHLog-0-hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData-prefix:412a5e44fd2e,39051,1732145893622 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:44,142 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 412a5e44fd2e%2C39051%2C1732145893622:(num 1732145893943) roll requested 2024-11-20T23:38:44,142 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C39051%2C1732145893622.1732145924142 2024-11-20T23:38:44,147 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,147 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,147 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,148 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,148 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,148 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/WALs/412a5e44fd2e,39051,1732145893622/412a5e44fd2e%2C39051%2C1732145893622.1732145893943 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/WALs/412a5e44fd2e,39051,1732145893622/412a5e44fd2e%2C39051%2C1732145893622.1732145924142 2024-11-20T23:38:44,148 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:44,148 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38013,DS-c3f003f0-d260-420a-b5ff-d8e9bb703cec,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T23:38:44,148 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/WALs/412a5e44fd2e,39051,1732145893622/412a5e44fd2e%2C39051%2C1732145893622.1732145893943 2024-11-20T23:38:44,149 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33949:33949),(127.0.0.1/127.0.0.1:34593:34593)] 2024-11-20T23:38:44,149 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/WALs/412a5e44fd2e,39051,1732145893622/412a5e44fd2e%2C39051%2C1732145893622.1732145893943 is not closed yet, will try archiving it next time 2024-11-20T23:38:44,149 WARN [IPC Server handler 0 on default port 46843 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/WALs/412a5e44fd2e,39051,1732145893622/412a5e44fd2e%2C39051%2C1732145893622.1732145893943 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-20T23:38:44,149 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/WALs/412a5e44fd2e,39051,1732145893622/412a5e44fd2e%2C39051%2C1732145893622.1732145893943 after 1ms 2024-11-20T23:38:44,166 DEBUG [M:0;412a5e44fd2e:39051 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/badc55396184458683435057cfbb6322 is 82, key is hbase:meta,,1/info:regioninfo/1732145894991/Put/seqid=0 2024-11-20T23:38:44,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741848_1033 (size=5672) 2024-11-20T23:38:44,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741848_1033 (size=5672) 2024-11-20T23:38:44,171 INFO [M:0;412a5e44fd2e:39051 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/badc55396184458683435057cfbb6322 2024-11-20T23:38:44,200 DEBUG [M:0;412a5e44fd2e:39051 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4c1adfdd0c284747bad11bb7abc94c61 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732145895544/Put/seqid=0 2024-11-20T23:38:44,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741849_1034 (size=6118) 2024-11-20T23:38:44,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741849_1034 (size=6118) 2024-11-20T23:38:44,205 INFO [M:0;412a5e44fd2e:39051 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4c1adfdd0c284747bad11bb7abc94c61 2024-11-20T23:38:44,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:38:44,212 INFO [RS:0;412a5e44fd2e:34697 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:38:44,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34697-0x1015a9ccc830001, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:38:44,212 INFO [RS:0;412a5e44fd2e:34697 {}] regionserver.HRegionServer(1031): Exiting; stopping=412a5e44fd2e,34697,1732145893799; zookeeper connection closed. 2024-11-20T23:38:44,213 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@40e2e903 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@40e2e903 2024-11-20T23:38:44,213 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T23:38:44,226 DEBUG [M:0;412a5e44fd2e:39051 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6433568d9ad6436689e89a11f3a1a6f7 is 69, key is 412a5e44fd2e,34697,1732145893799/rs:state/1732145894269/Put/seqid=0 2024-11-20T23:38:44,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741850_1035 (size=5156) 2024-11-20T23:38:44,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741850_1035 (size=5156) 2024-11-20T23:38:44,232 INFO [M:0;412a5e44fd2e:39051 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6433568d9ad6436689e89a11f3a1a6f7 2024-11-20T23:38:44,253 DEBUG [M:0;412a5e44fd2e:39051 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/426beea7e05f46ccae65732e7c9c3cd1 is 52, key is load_balancer_on/state:d/1732145895142/Put/seqid=0 2024-11-20T23:38:44,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741851_1036 (size=5056) 2024-11-20T23:38:44,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741851_1036 (size=5056) 2024-11-20T23:38:44,258 INFO [M:0;412a5e44fd2e:39051 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/426beea7e05f46ccae65732e7c9c3cd1 2024-11-20T23:38:44,263 DEBUG [M:0;412a5e44fd2e:39051 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/badc55396184458683435057cfbb6322 as hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/badc55396184458683435057cfbb6322 2024-11-20T23:38:44,269 INFO [M:0;412a5e44fd2e:39051 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/badc55396184458683435057cfbb6322, entries=8, sequenceid=56, filesize=5.5 K 2024-11-20T23:38:44,270 DEBUG [M:0;412a5e44fd2e:39051 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4c1adfdd0c284747bad11bb7abc94c61 as hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4c1adfdd0c284747bad11bb7abc94c61 2024-11-20T23:38:44,276 INFO [M:0;412a5e44fd2e:39051 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4c1adfdd0c284747bad11bb7abc94c61, entries=6, sequenceid=56, filesize=6.0 K 2024-11-20T23:38:44,277 DEBUG [M:0;412a5e44fd2e:39051 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6433568d9ad6436689e89a11f3a1a6f7 as hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6433568d9ad6436689e89a11f3a1a6f7 2024-11-20T23:38:44,282 INFO [M:0;412a5e44fd2e:39051 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6433568d9ad6436689e89a11f3a1a6f7, entries=1, sequenceid=56, filesize=5.0 K 2024-11-20T23:38:44,283 DEBUG [M:0;412a5e44fd2e:39051 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/426beea7e05f46ccae65732e7c9c3cd1 as hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/426beea7e05f46ccae65732e7c9c3cd1 2024-11-20T23:38:44,288 INFO [M:0;412a5e44fd2e:39051 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/426beea7e05f46ccae65732e7c9c3cd1, entries=1, sequenceid=56, filesize=4.9 K 2024-11-20T23:38:44,289 INFO [M:0;412a5e44fd2e:39051 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=56, compaction requested=false 2024-11-20T23:38:44,291 INFO [M:0;412a5e44fd2e:39051 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:44,291 DEBUG [M:0;412a5e44fd2e:39051 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732145924141Disabling compacts and flushes for region at 1732145924141Disabling writes for close at 1732145924141Obtaining lock to block concurrent updates at 1732145924141Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732145924141Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732145924142 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732145924149 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732145924149Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732145924165 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732145924165Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732145924177 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732145924199 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732145924199Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732145924210 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732145924226 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732145924226Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732145924237 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732145924252 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732145924252Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ddaca13: reopening flushed file at 1732145924262 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d6b2dd4: reopening flushed file at 1732145924269 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4bbc8f6d: reopening flushed file at 1732145924276 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49f1ca8a: reopening flushed file at 1732145924282 (+6 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=56, compaction requested=false at 1732145924289 (+7 ms)Writing region close event to WAL at 1732145924291 (+2 ms)Closed at 1732145924291 2024-11-20T23:38:44,291 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,291 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,291 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,291 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,292 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:38:44,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741847_1031 (size=757) 2024-11-20T23:38:44,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36821 is added to blk_1073741847_1031 (size=757) 2024-11-20T23:38:44,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:44,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:45,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:45,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:45,697 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T23:38:45,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:45,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:46,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:46,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:46,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:38:46,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T23:38:46,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T23:38:46,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T23:38:47,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:47,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:47,614 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T23:38:48,150 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/WALs/412a5e44fd2e,39051,1732145893622/412a5e44fd2e%2C39051%2C1732145893622.1732145893943 after 4002ms 2024-11-20T23:38:48,150 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/WALs/412a5e44fd2e,39051,1732145893622/412a5e44fd2e%2C39051%2C1732145893622.1732145893943 to hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/oldWALs/412a5e44fd2e%2C39051%2C1732145893622.1732145893943 2024-11-20T23:38:48,153 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/MasterData/oldWALs/412a5e44fd2e%2C39051%2C1732145893622.1732145893943 to hdfs://localhost:46843/user/jenkins/test-data/58037ac3-ff6d-2273-a69a-eb6c86e6008d/oldWALs/412a5e44fd2e%2C39051%2C1732145893622.1732145893943$masterlocalwal$ 2024-11-20T23:38:48,153 INFO [M:0;412a5e44fd2e:39051 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T23:38:48,153 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:38:48,153 INFO [M:0;412a5e44fd2e:39051 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39051 2024-11-20T23:38:48,153 INFO [M:0;412a5e44fd2e:39051 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:38:48,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:38:48,374 INFO [M:0;412a5e44fd2e:39051 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:38:48,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39051-0x1015a9ccc830000, quorum=127.0.0.1:59058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:38:48,377 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@606a795b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:48,377 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78f99ac1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:38:48,377 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:38:48,377 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ddd02f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:38:48,377 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e60361d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,STOPPED} 2024-11-20T23:38:48,379 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:38:48,379 WARN [BP-1780958559-172.17.0.2-1732145890772 heartbeating to localhost/127.0.0.1:46843 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:38:48,379 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:38:48,379 WARN [BP-1780958559-172.17.0.2-1732145890772 heartbeating to localhost/127.0.0.1:46843 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1780958559-172.17.0.2-1732145890772 (Datanode Uuid a90ad449-2e14-4fae-ac44-0475925d20ac) service to localhost/127.0.0.1:46843 2024-11-20T23:38:48,379 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data3/current/BP-1780958559-172.17.0.2-1732145890772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:48,380 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data4/current/BP-1780958559-172.17.0.2-1732145890772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:48,380 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:38:48,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f2f2023{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:48,383 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ae70be3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:38:48,383 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:38:48,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7269a538{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:38:48,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24dbb8ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,STOPPED} 2024-11-20T23:38:48,384 WARN [BP-1780958559-172.17.0.2-1732145890772 heartbeating to localhost/127.0.0.1:46843 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:38:48,384 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:38:48,385 WARN [BP-1780958559-172.17.0.2-1732145890772 heartbeating to localhost/127.0.0.1:46843 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1780958559-172.17.0.2-1732145890772 (Datanode Uuid 09b8d872-1fe7-45a4-9f26-fbd4a3e235af) service to localhost/127.0.0.1:46843 2024-11-20T23:38:48,385 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:38:48,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data1/current/BP-1780958559-172.17.0.2-1732145890772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:48,386 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/cluster_a16209c4-e13b-0381-16f0-b389a134d96c/data/data2/current/BP-1780958559-172.17.0.2-1732145890772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:38:48,386 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:38:48,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@71718145{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:38:48,393 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37ba1ac4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:38:48,393 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:38:48,393 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bf7054a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:38:48,393 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45628471{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir/,STOPPED} 2024-11-20T23:38:48,399 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T23:38:48,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T23:38:48,429 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 156) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46843 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46843 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46843 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46843 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46843 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46843 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:46843 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46843 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=294 (was 319), ProcessCount=11 (was 11), AvailableMemoryMB=257 (was 264) 2024-11-20T23:38:48,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:48,437 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=294, ProcessCount=11, AvailableMemoryMB=257 2024-11-20T23:38:48,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T23:38:48,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.log.dir so I do NOT create it in target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4 2024-11-20T23:38:48,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d0dba737-5a12-2d88-1055-df5d9f29bbf9/hadoop.tmp.dir so I do NOT create it in target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4 2024-11-20T23:38:48,437 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/cluster_bc7d60bd-7218-eab4-2f5f-d3654a22a956, deleteOnExit=true 2024-11-20T23:38:48,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T23:38:48,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/test.cache.data in system properties and HBase conf 2024-11-20T23:38:48,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T23:38:48,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/hadoop.log.dir in system properties and HBase conf 2024-11-20T23:38:48,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T23:38:48,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T23:38:48,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T23:38:48,438 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T23:38:48,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:38:48,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:38:48,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T23:38:48,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:38:48,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T23:38:48,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T23:38:48,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:38:48,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:38:48,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T23:38:48,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/nfs.dump.dir in system properties and HBase conf 2024-11-20T23:38:48,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/java.io.tmpdir in system properties and HBase conf 2024-11-20T23:38:48,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:38:48,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T23:38:48,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T23:38:48,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:48,454 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:38:48,779 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:38:48,784 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:38:48,785 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:38:48,785 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:38:48,785 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:38:48,786 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:38:48,786 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fb33a9d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:38:48,787 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1af676f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:38:48,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@188d3e33{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/java.io.tmpdir/jetty-localhost-38269-hadoop-hdfs-3_4_1-tests_jar-_-any-7906861516515725195/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:38:48,913 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2aaa4790{HTTP/1.1, (http/1.1)}{localhost:38269} 2024-11-20T23:38:48,913 INFO [Time-limited test {}] server.Server(415): Started @199113ms 2024-11-20T23:38:48,929 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:38:49,424 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:38:49,432 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:38:49,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:49,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:38:49,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:38:49,437 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T23:38:49,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@507832d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:38:49,438 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9b25e94{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:38:49,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:49,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5aa33ca4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/java.io.tmpdir/jetty-localhost-39945-hadoop-hdfs-3_4_1-tests_jar-_-any-9393328884816364801/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:49,549 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3be31a0b{HTTP/1.1, (http/1.1)}{localhost:39945} 2024-11-20T23:38:49,549 INFO [Time-limited test {}] server.Server(415): Started @199749ms 2024-11-20T23:38:49,550 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:38:49,586 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:38:49,590 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:38:49,592 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:38:49,592 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:38:49,592 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T23:38:49,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@311facd9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:38:49,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@555a4a92{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:38:49,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a3c6b7a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/java.io.tmpdir/jetty-localhost-38193-hadoop-hdfs-3_4_1-tests_jar-_-any-12889806425102776407/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:38:49,710 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1beefc80{HTTP/1.1, (http/1.1)}{localhost:38193} 2024-11-20T23:38:49,710 INFO [Time-limited test {}] server.Server(415): Started @199910ms 2024-11-20T23:38:49,711 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:38:50,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:50,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:50,612 WARN [Thread-1669 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/cluster_bc7d60bd-7218-eab4-2f5f-d3654a22a956/data/data1/current/BP-1751864290-172.17.0.2-1732145928466/current, will proceed with Du for space computation calculation, 2024-11-20T23:38:50,612 WARN [Thread-1670 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/cluster_bc7d60bd-7218-eab4-2f5f-d3654a22a956/data/data2/current/BP-1751864290-172.17.0.2-1732145928466/current, will proceed with Du for space computation calculation, 2024-11-20T23:38:50,631 WARN [Thread-1633 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:38:50,633 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa4fb1513ae31bd1a with lease ID 0xe778ec5a2c11b311: Processing first storage report for DS-1b120614-28af-4703-9d49-f1d1c6be64ef from datanode DatanodeRegistration(127.0.0.1:42979, datanodeUuid=1d6dee23-25e5-4fec-9bc9-a82bce7e1309, infoPort=41817, infoSecurePort=0, ipcPort=45979, storageInfo=lv=-57;cid=testClusterID;nsid=570364039;c=1732145928466) 2024-11-20T23:38:50,633 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4fb1513ae31bd1a with lease ID 0xe778ec5a2c11b311: from storage DS-1b120614-28af-4703-9d49-f1d1c6be64ef node DatanodeRegistration(127.0.0.1:42979, datanodeUuid=1d6dee23-25e5-4fec-9bc9-a82bce7e1309, infoPort=41817, infoSecurePort=0, ipcPort=45979, storageInfo=lv=-57;cid=testClusterID;nsid=570364039;c=1732145928466), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:38:50,633 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa4fb1513ae31bd1a with lease ID 0xe778ec5a2c11b311: Processing first storage report for DS-5cd02a97-9538-4ec3-bf89-9d57434e0da4 from datanode DatanodeRegistration(127.0.0.1:42979, datanodeUuid=1d6dee23-25e5-4fec-9bc9-a82bce7e1309, infoPort=41817, infoSecurePort=0, ipcPort=45979, storageInfo=lv=-57;cid=testClusterID;nsid=570364039;c=1732145928466) 2024-11-20T23:38:50,633 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4fb1513ae31bd1a with lease ID 0xe778ec5a2c11b311: from storage DS-5cd02a97-9538-4ec3-bf89-9d57434e0da4 node DatanodeRegistration(127.0.0.1:42979, datanodeUuid=1d6dee23-25e5-4fec-9bc9-a82bce7e1309, infoPort=41817, infoSecurePort=0, ipcPort=45979, storageInfo=lv=-57;cid=testClusterID;nsid=570364039;c=1732145928466), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:38:50,846 WARN [Thread-1680 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/cluster_bc7d60bd-7218-eab4-2f5f-d3654a22a956/data/data3/current/BP-1751864290-172.17.0.2-1732145928466/current, will proceed with Du for space computation calculation, 2024-11-20T23:38:50,847 WARN [Thread-1681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/cluster_bc7d60bd-7218-eab4-2f5f-d3654a22a956/data/data4/current/BP-1751864290-172.17.0.2-1732145928466/current, will proceed with Du for space computation calculation, 2024-11-20T23:38:50,869 WARN [Thread-1656 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:38:50,871 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5b50588b4490bb with lease ID 0xe778ec5a2c11b312: Processing first storage report for DS-d54dc545-26f8-4118-a8ad-6be637aa05b3 from datanode DatanodeRegistration(127.0.0.1:37437, datanodeUuid=4fce3929-157c-4ad0-acc8-229f9005b9de, infoPort=46487, infoSecurePort=0, ipcPort=39397, storageInfo=lv=-57;cid=testClusterID;nsid=570364039;c=1732145928466) 2024-11-20T23:38:50,871 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5b50588b4490bb with lease ID 0xe778ec5a2c11b312: from storage DS-d54dc545-26f8-4118-a8ad-6be637aa05b3 node DatanodeRegistration(127.0.0.1:37437, datanodeUuid=4fce3929-157c-4ad0-acc8-229f9005b9de, infoPort=46487, infoSecurePort=0, ipcPort=39397, storageInfo=lv=-57;cid=testClusterID;nsid=570364039;c=1732145928466), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T23:38:50,871 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5b50588b4490bb with lease ID 0xe778ec5a2c11b312: Processing first storage report for DS-52beffbe-2e17-4808-b657-158e8f6b1aaa from datanode DatanodeRegistration(127.0.0.1:37437, datanodeUuid=4fce3929-157c-4ad0-acc8-229f9005b9de, infoPort=46487, infoSecurePort=0, ipcPort=39397, storageInfo=lv=-57;cid=testClusterID;nsid=570364039;c=1732145928466) 2024-11-20T23:38:50,871 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5b50588b4490bb with lease ID 0xe778ec5a2c11b312: from storage DS-52beffbe-2e17-4808-b657-158e8f6b1aaa node DatanodeRegistration(127.0.0.1:37437, datanodeUuid=4fce3929-157c-4ad0-acc8-229f9005b9de, infoPort=46487, infoSecurePort=0, ipcPort=39397, storageInfo=lv=-57;cid=testClusterID;nsid=570364039;c=1732145928466), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:38:50,952 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4 2024-11-20T23:38:50,955 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/cluster_bc7d60bd-7218-eab4-2f5f-d3654a22a956/zookeeper_0, clientPort=65104, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/cluster_bc7d60bd-7218-eab4-2f5f-d3654a22a956/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/cluster_bc7d60bd-7218-eab4-2f5f-d3654a22a956/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T23:38:50,956 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=65104 2024-11-20T23:38:50,956 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:50,958 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:50,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:38:50,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:38:50,971 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca with version=8 2024-11-20T23:38:50,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/hbase-staging 2024-11-20T23:38:50,973 INFO [Time-limited test {}] client.ConnectionUtils(128): master/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:38:50,973 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:38:50,973 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:38:50,973 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:38:50,973 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:38:50,973 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:38:50,973 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T23:38:50,973 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:38:50,974 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39945 2024-11-20T23:38:50,975 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39945 connecting to ZooKeeper ensemble=127.0.0.1:65104 2024-11-20T23:38:51,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:399450x0, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:38:51,043 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39945-0x1015a9d5e760000 connected 2024-11-20T23:38:51,127 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:51,129 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:51,131 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:38:51,131 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca, hbase.cluster.distributed=false 2024-11-20T23:38:51,133 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:38:51,133 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39945 2024-11-20T23:38:51,134 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39945 2024-11-20T23:38:51,134 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39945 2024-11-20T23:38:51,134 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39945 2024-11-20T23:38:51,135 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39945 2024-11-20T23:38:51,155 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:38:51,155 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:38:51,155 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:38:51,155 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:38:51,155 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:38:51,155 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:38:51,155 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T23:38:51,155 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:38:51,156 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43969 2024-11-20T23:38:51,157 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43969 connecting to ZooKeeper ensemble=127.0.0.1:65104 2024-11-20T23:38:51,158 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:51,159 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:51,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:439690x0, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:38:51,169 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:439690x0, quorum=127.0.0.1:65104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:38:51,169 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43969-0x1015a9d5e760001 connected 2024-11-20T23:38:51,170 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T23:38:51,170 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T23:38:51,171 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T23:38:51,172 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:38:51,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43969 2024-11-20T23:38:51,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43969 2024-11-20T23:38:51,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43969 2024-11-20T23:38:51,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43969 2024-11-20T23:38:51,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43969 2024-11-20T23:38:51,187 DEBUG [M:0;412a5e44fd2e:39945 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;412a5e44fd2e:39945 2024-11-20T23:38:51,187 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/412a5e44fd2e,39945,1732145930972 2024-11-20T23:38:51,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:38:51,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:38:51,196 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/412a5e44fd2e,39945,1732145930972 2024-11-20T23:38:51,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T23:38:51,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:51,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:51,207 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T23:38:51,208 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/412a5e44fd2e,39945,1732145930972 from backup master directory 2024-11-20T23:38:51,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/412a5e44fd2e,39945,1732145930972 2024-11-20T23:38:51,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:38:51,217 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:38:51,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:38:51,217 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=412a5e44fd2e,39945,1732145930972 2024-11-20T23:38:51,221 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/hbase.id] with ID: 4a95637a-8846-4bcd-8fac-c99b13c02b8d 2024-11-20T23:38:51,221 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/.tmp/hbase.id 2024-11-20T23:38:51,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:38:51,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:38:51,230 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/.tmp/hbase.id]:[hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/hbase.id] 2024-11-20T23:38:51,243 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:51,243 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T23:38:51,244 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-20T23:38:51,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:51,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:51,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:38:51,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:38:51,265 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T23:38:51,265 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T23:38:51,266 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:38:51,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:38:51,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:38:51,284 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store 2024-11-20T23:38:51,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:38:51,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:38:51,292 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:38:51,292 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:38:51,292 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:51,292 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:51,292 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:38:51,292 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:51,292 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:38:51,292 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732145931292Disabling compacts and flushes for region at 1732145931292Disabling writes for close at 1732145931292Writing region close event to WAL at 1732145931292Closed at 1732145931292 2024-11-20T23:38:51,293 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/.initializing 2024-11-20T23:38:51,293 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/WALs/412a5e44fd2e,39945,1732145930972 2024-11-20T23:38:51,296 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C39945%2C1732145930972, suffix=, logDir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/WALs/412a5e44fd2e,39945,1732145930972, archiveDir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/oldWALs, maxLogs=10 2024-11-20T23:38:51,297 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C39945%2C1732145930972.1732145931296 2024-11-20T23:38:51,302 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/WALs/412a5e44fd2e,39945,1732145930972/412a5e44fd2e%2C39945%2C1732145930972.1732145931296 2024-11-20T23:38:51,303 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41817:41817),(127.0.0.1/127.0.0.1:46487:46487)] 2024-11-20T23:38:51,304 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:38:51,304 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:38:51,304 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:51,304 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:51,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:51,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T23:38:51,310 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:51,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:51,311 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:51,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T23:38:51,313 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:51,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:38:51,314 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:51,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T23:38:51,316 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:51,316 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:38:51,317 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:51,319 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T23:38:51,319 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:51,320 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:38:51,320 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:51,322 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:51,322 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:51,326 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:51,326 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:51,328 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T23:38:51,330 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:38:51,333 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:38:51,333 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=864250, jitterRate=0.09895190596580505}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T23:38:51,334 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732145931305Initializing all the Stores at 1732145931305Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145931306 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145931308 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145931308Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145931308Cleaning up temporary data from old regions at 1732145931327 (+19 ms)Region opened successfully at 1732145931334 (+7 ms) 2024-11-20T23:38:51,335 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T23:38:51,339 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57410400, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:38:51,340 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T23:38:51,341 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T23:38:51,341 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T23:38:51,341 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T23:38:51,342 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T23:38:51,342 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T23:38:51,342 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T23:38:51,345 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T23:38:51,346 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T23:38:51,358 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T23:38:51,359 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T23:38:51,359 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T23:38:51,369 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T23:38:51,369 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T23:38:51,370 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T23:38:51,379 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T23:38:51,380 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T23:38:51,390 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T23:38:51,392 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T23:38:51,400 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T23:38:51,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:38:51,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:38:51,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:51,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:51,411 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=412a5e44fd2e,39945,1732145930972, sessionid=0x1015a9d5e760000, setting cluster-up flag (Was=false) 2024-11-20T23:38:51,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:51,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:51,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:51,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:51,464 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T23:38:51,465 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,39945,1732145930972 2024-11-20T23:38:51,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:51,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:51,723 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T23:38:51,725 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,39945,1732145930972 2024-11-20T23:38:51,727 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T23:38:51,729 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T23:38:51,729 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T23:38:51,730 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T23:38:51,730 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 412a5e44fd2e,39945,1732145930972 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T23:38:51,732 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:38:51,732 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:38:51,732 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:38:51,732 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:38:51,732 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/412a5e44fd2e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T23:38:51,732 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:51,732 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:38:51,732 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:51,734 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732145961734 2024-11-20T23:38:51,734 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T23:38:51,734 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T23:38:51,734 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T23:38:51,734 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T23:38:51,734 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T23:38:51,734 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T23:38:51,734 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:38:51,734 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:51,734 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T23:38:51,735 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T23:38:51,735 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T23:38:51,735 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T23:38:51,735 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T23:38:51,735 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T23:38:51,736 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:51,736 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145931735,5,FailOnTimeoutGroup] 2024-11-20T23:38:51,736 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T23:38:51,740 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145931736,5,FailOnTimeoutGroup] 2024-11-20T23:38:51,740 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:51,740 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T23:38:51,740 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:51,740 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:51,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:38:51,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:38:51,748 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T23:38:51,748 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca 2024-11-20T23:38:51,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:38:51,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:38:51,757 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:38:51,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:38:51,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:38:51,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:51,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:51,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:38:51,763 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:38:51,763 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:51,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:51,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:38:51,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:38:51,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:51,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:51,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:38:51,768 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:38:51,768 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:51,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:51,769 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:38:51,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740 2024-11-20T23:38:51,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740 2024-11-20T23:38:51,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:38:51,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:38:51,772 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:38:51,773 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:38:51,775 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(746): ClusterId : 4a95637a-8846-4bcd-8fac-c99b13c02b8d 2024-11-20T23:38:51,775 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T23:38:51,775 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:38:51,776 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759146, jitterRate=-0.03469665348529816}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:38:51,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732145931757Initializing all the Stores at 1732145931758 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145931758Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145931759 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145931759Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145931759Cleaning up temporary data from old regions at 1732145931772 (+13 ms)Region opened successfully at 1732145931776 (+4 ms) 2024-11-20T23:38:51,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:38:51,777 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:38:51,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:38:51,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:38:51,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:38:51,777 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:38:51,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732145931776Disabling compacts and flushes for region at 1732145931776Disabling writes for close at 1732145931777 (+1 ms)Writing region close event to WAL at 1732145931777Closed at 1732145931777 2024-11-20T23:38:51,778 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:38:51,778 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T23:38:51,778 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T23:38:51,780 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:38:51,781 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T23:38:51,931 WARN [412a5e44fd2e:39945 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T23:38:52,030 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T23:38:52,030 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T23:38:52,113 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T23:38:52,114 DEBUG [RS:0;412a5e44fd2e:43969 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fd392b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:38:52,130 DEBUG [RS:0;412a5e44fd2e:43969 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;412a5e44fd2e:43969 2024-11-20T23:38:52,130 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T23:38:52,130 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T23:38:52,130 DEBUG [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T23:38:52,131 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(2659): reportForDuty to master=412a5e44fd2e,39945,1732145930972 with port=43969, startcode=1732145931154 2024-11-20T23:38:52,131 DEBUG [RS:0;412a5e44fd2e:43969 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T23:38:52,133 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53951, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T23:38:52,134 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39945 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 412a5e44fd2e,43969,1732145931154 2024-11-20T23:38:52,134 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39945 {}] master.ServerManager(517): Registering regionserver=412a5e44fd2e,43969,1732145931154 2024-11-20T23:38:52,135 DEBUG [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca 2024-11-20T23:38:52,135 DEBUG [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33281 2024-11-20T23:38:52,136 DEBUG [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T23:38:52,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:38:52,210 DEBUG [RS:0;412a5e44fd2e:43969 {}] zookeeper.ZKUtil(111): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/412a5e44fd2e,43969,1732145931154 2024-11-20T23:38:52,211 WARN [RS:0;412a5e44fd2e:43969 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:38:52,211 INFO [RS:0;412a5e44fd2e:43969 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:38:52,211 DEBUG [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154 2024-11-20T23:38:52,211 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [412a5e44fd2e,43969,1732145931154] 2024-11-20T23:38:52,216 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T23:38:52,218 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T23:38:52,219 INFO [RS:0;412a5e44fd2e:43969 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T23:38:52,219 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,219 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T23:38:52,220 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T23:38:52,220 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,221 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:52,221 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:52,221 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:52,221 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:52,221 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:52,221 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:38:52,221 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:52,221 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:52,221 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:52,221 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:52,221 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:52,222 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:38:52,222 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:38:52,222 DEBUG [RS:0;412a5e44fd2e:43969 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:38:52,222 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,222 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,222 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,222 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,222 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,222 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,43969,1732145931154-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:38:52,240 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T23:38:52,241 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,43969,1732145931154-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,241 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,241 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.Replication(171): 412a5e44fd2e,43969,1732145931154 started 2024-11-20T23:38:52,257 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,257 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(1482): Serving as 412a5e44fd2e,43969,1732145931154, RpcServer on 412a5e44fd2e/172.17.0.2:43969, sessionid=0x1015a9d5e760001 2024-11-20T23:38:52,257 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T23:38:52,257 DEBUG [RS:0;412a5e44fd2e:43969 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 412a5e44fd2e,43969,1732145931154 2024-11-20T23:38:52,258 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,43969,1732145931154' 2024-11-20T23:38:52,258 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T23:38:52,258 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T23:38:52,259 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T23:38:52,259 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T23:38:52,259 DEBUG [RS:0;412a5e44fd2e:43969 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 412a5e44fd2e,43969,1732145931154 2024-11-20T23:38:52,259 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,43969,1732145931154' 2024-11-20T23:38:52,259 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T23:38:52,259 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T23:38:52,259 DEBUG [RS:0;412a5e44fd2e:43969 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T23:38:52,259 INFO [RS:0;412a5e44fd2e:43969 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T23:38:52,259 INFO [RS:0;412a5e44fd2e:43969 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T23:38:52,362 INFO [RS:0;412a5e44fd2e:43969 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C43969%2C1732145931154, suffix=, logDir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154, archiveDir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/oldWALs, maxLogs=32 2024-11-20T23:38:52,362 INFO [RS:0;412a5e44fd2e:43969 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C43969%2C1732145931154.1732145932362 2024-11-20T23:38:52,369 INFO [RS:0;412a5e44fd2e:43969 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154/412a5e44fd2e%2C43969%2C1732145931154.1732145932362 2024-11-20T23:38:52,392 DEBUG [RS:0;412a5e44fd2e:43969 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41817:41817),(127.0.0.1/127.0.0.1:46487:46487)] 2024-11-20T23:38:52,431 DEBUG [412a5e44fd2e:39945 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T23:38:52,432 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=412a5e44fd2e,43969,1732145931154 2024-11-20T23:38:52,437 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,43969,1732145931154, state=OPENING 2024-11-20T23:38:52,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:52,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:52,448 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T23:38:52,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:52,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:38:52,460 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:38:52,460 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:38:52,460 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,43969,1732145931154}] 2024-11-20T23:38:52,460 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:38:52,502 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T23:38:52,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:52,613 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T23:38:52,615 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58653, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T23:38:52,618 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T23:38:52,619 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:38:52,621 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C43969%2C1732145931154.meta, suffix=.meta, logDir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154, archiveDir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/oldWALs, maxLogs=32 2024-11-20T23:38:52,621 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C43969%2C1732145931154.meta.1732145932621.meta 2024-11-20T23:38:52,627 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154/412a5e44fd2e%2C43969%2C1732145931154.meta.1732145932621.meta 2024-11-20T23:38:52,632 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41817:41817),(127.0.0.1/127.0.0.1:46487:46487)] 2024-11-20T23:38:52,633 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:38:52,634 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T23:38:52,634 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T23:38:52,634 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T23:38:52,634 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T23:38:52,634 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:38:52,634 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T23:38:52,634 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T23:38:52,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:38:52,636 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:38:52,636 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:52,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:52,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:38:52,638 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:38:52,638 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:52,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:52,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:38:52,639 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:38:52,639 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:52,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:52,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:38:52,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:38:52,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:52,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:38:52,641 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:38:52,642 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740 2024-11-20T23:38:52,643 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740 2024-11-20T23:38:52,644 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:38:52,644 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:38:52,644 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:38:52,646 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:38:52,646 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770096, jitterRate=-0.02077263593673706}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:38:52,646 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T23:38:52,647 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732145932634Writing region info on filesystem at 1732145932634Initializing all the Stores at 1732145932635 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145932635Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145932635Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145932635Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145932635Cleaning up temporary data from old regions at 1732145932644 (+9 ms)Running coprocessor post-open hooks at 1732145932646 (+2 ms)Region opened successfully at 1732145932647 (+1 ms) 2024-11-20T23:38:52,648 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732145932613 2024-11-20T23:38:52,651 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T23:38:52,651 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T23:38:52,651 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=412a5e44fd2e,43969,1732145931154 2024-11-20T23:38:52,652 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,43969,1732145931154, state=OPEN 2024-11-20T23:38:52,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:38:52,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:38:52,688 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=412a5e44fd2e,43969,1732145931154 2024-11-20T23:38:52,688 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:38:52,688 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:38:52,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T23:38:52,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,43969,1732145931154 in 228 msec 2024-11-20T23:38:52,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T23:38:52,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 914 msec 2024-11-20T23:38:52,695 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:38:52,695 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T23:38:52,697 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:38:52,697 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,43969,1732145931154, seqNum=-1] 2024-11-20T23:38:52,697 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:38:52,699 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55837, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:38:52,705 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 975 msec 2024-11-20T23:38:52,705 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732145932705, completionTime=-1 2024-11-20T23:38:52,705 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T23:38:52,705 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T23:38:52,707 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T23:38:52,707 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732145992707 2024-11-20T23:38:52,707 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732146052707 2024-11-20T23:38:52,707 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-20T23:38:52,708 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,39945,1732145930972-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,708 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,39945,1732145930972-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,708 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,39945,1732145930972-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,708 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-412a5e44fd2e:39945, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,708 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,709 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,710 DEBUG [master/412a5e44fd2e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T23:38:52,712 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.495sec 2024-11-20T23:38:52,712 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T23:38:52,712 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T23:38:52,712 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T23:38:52,712 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T23:38:52,712 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T23:38:52,712 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,39945,1732145930972-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:38:52,712 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,39945,1732145930972-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T23:38:52,715 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T23:38:52,715 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T23:38:52,715 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,39945,1732145930972-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:38:52,776 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c49893, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:38:52,776 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 412a5e44fd2e,39945,-1 for getting cluster id 2024-11-20T23:38:52,776 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T23:38:52,778 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4a95637a-8846-4bcd-8fac-c99b13c02b8d' 2024-11-20T23:38:52,779 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T23:38:52,779 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4a95637a-8846-4bcd-8fac-c99b13c02b8d" 2024-11-20T23:38:52,779 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71cf5c3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:38:52,780 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [412a5e44fd2e,39945,-1] 2024-11-20T23:38:52,780 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T23:38:52,780 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:38:52,782 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32970, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T23:38:52,783 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@380e5c32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:38:52,784 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:38:52,785 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,43969,1732145931154, seqNum=-1] 2024-11-20T23:38:52,786 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:38:52,787 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36278, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:38:52,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=412a5e44fd2e,39945,1732145930972 2024-11-20T23:38:52,789 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:38:52,792 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T23:38:52,792 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T23:38:52,793 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 412a5e44fd2e,39945,1732145930972 2024-11-20T23:38:52,793 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@779f45a 2024-11-20T23:38:52,793 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T23:38:52,794 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32974, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T23:38:52,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T23:38:52,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T23:38:52,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T23:38:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T23:38:52,798 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T23:38:52,798 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:52,798 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-20T23:38:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T23:38:52,799 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T23:38:52,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741835_1011 (size=405) 2024-11-20T23:38:52,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741835_1011 (size=405) 2024-11-20T23:38:52,807 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cd4c26dcc706ee34776aa8d52b048c3d, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca 2024-11-20T23:38:52,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741836_1012 (size=88) 2024-11-20T23:38:52,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741836_1012 (size=88) 2024-11-20T23:38:52,814 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:38:52,814 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing cd4c26dcc706ee34776aa8d52b048c3d, disabling compactions & flushes 2024-11-20T23:38:52,814 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:38:52,814 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:38:52,814 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. after waiting 0 ms 2024-11-20T23:38:52,814 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:38:52,814 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:38:52,814 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for cd4c26dcc706ee34776aa8d52b048c3d: Waiting for close lock at 1732145932814Disabling compacts and flushes for region at 1732145932814Disabling writes for close at 1732145932814Writing region close event to WAL at 1732145932814Closed at 1732145932814 2024-11-20T23:38:52,816 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T23:38:52,816 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732145932816"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732145932816"}]},"ts":"1732145932816"} 2024-11-20T23:38:52,819 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T23:38:52,820 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T23:38:52,820 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732145932820"}]},"ts":"1732145932820"} 2024-11-20T23:38:52,822 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-20T23:38:52,823 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cd4c26dcc706ee34776aa8d52b048c3d, ASSIGN}] 2024-11-20T23:38:52,824 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cd4c26dcc706ee34776aa8d52b048c3d, ASSIGN 2024-11-20T23:38:52,825 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cd4c26dcc706ee34776aa8d52b048c3d, ASSIGN; state=OFFLINE, location=412a5e44fd2e,43969,1732145931154; forceNewPlan=false, retain=false 2024-11-20T23:38:52,976 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cd4c26dcc706ee34776aa8d52b048c3d, regionState=OPENING, regionLocation=412a5e44fd2e,43969,1732145931154 2024-11-20T23:38:52,980 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cd4c26dcc706ee34776aa8d52b048c3d, ASSIGN because future has completed 2024-11-20T23:38:52,981 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd4c26dcc706ee34776aa8d52b048c3d, server=412a5e44fd2e,43969,1732145931154}] 2024-11-20T23:38:53,141 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:38:53,141 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => cd4c26dcc706ee34776aa8d52b048c3d, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d.', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:38:53,142 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:38:53,142 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:38:53,142 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:38:53,142 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:38:53,144 INFO [StoreOpener-cd4c26dcc706ee34776aa8d52b048c3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:38:53,146 INFO [StoreOpener-cd4c26dcc706ee34776aa8d52b048c3d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd4c26dcc706ee34776aa8d52b048c3d columnFamilyName info 2024-11-20T23:38:53,147 DEBUG [StoreOpener-cd4c26dcc706ee34776aa8d52b048c3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:38:53,147 INFO [StoreOpener-cd4c26dcc706ee34776aa8d52b048c3d-1 {}] regionserver.HStore(327): Store=cd4c26dcc706ee34776aa8d52b048c3d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:38:53,148 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:38:53,149 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:38:53,149 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:38:53,150 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:38:53,150 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:38:53,152 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:38:53,154 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:38:53,155 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened cd4c26dcc706ee34776aa8d52b048c3d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856325, jitterRate=0.08887439966201782}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T23:38:53,155 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:38:53,156 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for cd4c26dcc706ee34776aa8d52b048c3d: Running coprocessor pre-open hook at 1732145933142Writing region info on filesystem at 1732145933142Initializing all the Stores at 1732145933144 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145933144Cleaning up temporary data from old regions at 1732145933150 (+6 ms)Running coprocessor post-open hooks at 1732145933155 (+5 ms)Region opened successfully at 1732145933156 (+1 ms) 2024-11-20T23:38:53,157 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d., pid=6, masterSystemTime=1732145933135 2024-11-20T23:38:53,161 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:38:53,161 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:38:53,162 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cd4c26dcc706ee34776aa8d52b048c3d, regionState=OPEN, openSeqNum=2, regionLocation=412a5e44fd2e,43969,1732145931154 2024-11-20T23:38:53,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd4c26dcc706ee34776aa8d52b048c3d, server=412a5e44fd2e,43969,1732145931154 because future has completed 2024-11-20T23:38:53,170 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T23:38:53,170 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure cd4c26dcc706ee34776aa8d52b048c3d, server=412a5e44fd2e,43969,1732145931154 in 186 msec 2024-11-20T23:38:53,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T23:38:53,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cd4c26dcc706ee34776aa8d52b048c3d, ASSIGN in 348 msec 2024-11-20T23:38:53,174 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T23:38:53,174 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732145933174"}]},"ts":"1732145933174"} 2024-11-20T23:38:53,176 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-20T23:38:53,178 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T23:38:53,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 383 msec 2024-11-20T23:38:53,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:53,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:54,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:54,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:55,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:55,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:56,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:56,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:56,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T23:38:56,999 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T23:38:57,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:38:57,000 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T23:38:57,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T23:38:57,000 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T23:38:57,001 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T23:38:57,001 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-20T23:38:57,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:57,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:58,137 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T23:38:58,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,171 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:38:58,216 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T23:38:58,217 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-20T23:38:58,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:58,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:59,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:38:59,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:00,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:00,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:01,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:01,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:02,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:02,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:02,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T23:39:02,846 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T23:39:02,846 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-20T23:39:02,850 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T23:39:02,850 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:02,854 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d., hostname=412a5e44fd2e,43969,1732145931154, seqNum=2] 2024-11-20T23:39:02,862 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T23:39:02,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T23:39:02,867 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T23:39:02,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-20T23:39:02,869 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T23:39:02,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T23:39:03,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43969 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-20T23:39:03,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:03,033 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing cd4c26dcc706ee34776aa8d52b048c3d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T23:39:03,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/cde3393d5f714e849b52638a458c1988 is 1080, key is row0001/info:/1732145942855/Put/seqid=0 2024-11-20T23:39:03,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741837_1013 (size=6033) 2024-11-20T23:39:03,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741837_1013 (size=6033) 2024-11-20T23:39:03,059 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/cde3393d5f714e849b52638a458c1988 2024-11-20T23:39:03,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/cde3393d5f714e849b52638a458c1988 as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/cde3393d5f714e849b52638a458c1988 2024-11-20T23:39:03,074 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/cde3393d5f714e849b52638a458c1988, entries=1, sequenceid=5, filesize=5.9 K 2024-11-20T23:39:03,075 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cd4c26dcc706ee34776aa8d52b048c3d in 42ms, sequenceid=5, compaction requested=false 2024-11-20T23:39:03,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for cd4c26dcc706ee34776aa8d52b048c3d: 2024-11-20T23:39:03,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:03,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-20T23:39:03,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-20T23:39:03,084 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-20T23:39:03,084 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 210 msec 2024-11-20T23:39:03,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 222 msec 2024-11-20T23:39:03,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:03,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:04,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:04,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:05,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:05,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:06,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:06,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:07,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:07,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:08,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:08,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:09,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:09,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:10,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:10,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 after 68049ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:39:10,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:10,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta after 68043ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T23:39:11,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:11,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:12,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:12,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-20T23:39:12,886 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T23:39:12,890 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T23:39:12,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T23:39:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-20T23:39:12,894 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T23:39:12,896 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T23:39:12,896 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T23:39:13,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43969 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-20T23:39:13,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:13,050 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing cd4c26dcc706ee34776aa8d52b048c3d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T23:39:13,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/5350f68211cb4076ad162d889cb8815d is 1080, key is row0002/info:/1732145952887/Put/seqid=0 2024-11-20T23:39:13,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741838_1014 (size=6033) 2024-11-20T23:39:13,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741838_1014 (size=6033) 2024-11-20T23:39:13,064 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/5350f68211cb4076ad162d889cb8815d 2024-11-20T23:39:13,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/5350f68211cb4076ad162d889cb8815d as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/5350f68211cb4076ad162d889cb8815d 2024-11-20T23:39:13,078 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/5350f68211cb4076ad162d889cb8815d, entries=1, sequenceid=9, filesize=5.9 K 2024-11-20T23:39:13,079 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cd4c26dcc706ee34776aa8d52b048c3d in 29ms, sequenceid=9, compaction requested=false 2024-11-20T23:39:13,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for cd4c26dcc706ee34776aa8d52b048c3d: 2024-11-20T23:39:13,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:13,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-20T23:39:13,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-20T23:39:13,083 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T23:39:13,083 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-11-20T23:39:13,086 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 193 msec 2024-11-20T23:39:13,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:13,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:14,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:14,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:15,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:15,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:16,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:16,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:17,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:17,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:18,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:18,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:19,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:19,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:20,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:20,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:20,952 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T23:39:21,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:21,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:22,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:22,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:22,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-20T23:39:22,925 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T23:39:22,928 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C43969%2C1732145931154.1732145962928 2024-11-20T23:39:23,092 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 162 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42979,DS-1b120614-28af-4703-9d49-f1d1c6be64ef,DISK], DatanodeInfoWithStorage[127.0.0.1:37437,DS-d54dc545-26f8-4118-a8ad-6be637aa05b3,DISK]] 2024-11-20T23:39:23,093 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:23,093 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:23,093 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:23,093 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:23,093 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:23,093 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154/412a5e44fd2e%2C43969%2C1732145931154.1732145932362 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154/412a5e44fd2e%2C43969%2C1732145931154.1732145962928 2024-11-20T23:39:23,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741833_1009 (size=5546) 2024-11-20T23:39:23,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741833_1009 (size=5546) 2024-11-20T23:39:23,109 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41817:41817),(127.0.0.1/127.0.0.1:46487:46487)] 2024-11-20T23:39:23,110 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T23:39:23,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T23:39:23,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-20T23:39:23,112 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T23:39:23,113 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T23:39:23,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T23:39:23,266 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43969 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-20T23:39:23,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:23,267 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing cd4c26dcc706ee34776aa8d52b048c3d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T23:39:23,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/011afad97ecf4d4cbbcc83d7e0246fd9 is 1080, key is row0003/info:/1732145962927/Put/seqid=0 2024-11-20T23:39:23,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741840_1016 (size=6033) 2024-11-20T23:39:23,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741840_1016 (size=6033) 2024-11-20T23:39:23,281 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/011afad97ecf4d4cbbcc83d7e0246fd9 2024-11-20T23:39:23,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/011afad97ecf4d4cbbcc83d7e0246fd9 as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/011afad97ecf4d4cbbcc83d7e0246fd9 2024-11-20T23:39:23,294 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/011afad97ecf4d4cbbcc83d7e0246fd9, entries=1, sequenceid=13, filesize=5.9 K 2024-11-20T23:39:23,295 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cd4c26dcc706ee34776aa8d52b048c3d in 28ms, sequenceid=13, compaction requested=true 2024-11-20T23:39:23,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for cd4c26dcc706ee34776aa8d52b048c3d: 2024-11-20T23:39:23,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:23,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-20T23:39:23,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-20T23:39:23,300 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-20T23:39:23,300 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-20T23:39:23,303 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-20T23:39:23,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:23,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:24,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:24,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:25,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:25,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:26,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:26,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:27,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:27,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:28,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:28,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:29,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:29,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:30,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:30,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:31,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:31,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:32,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:32,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:33,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-20T23:39:33,165 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T23:39:33,165 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:39:33,166 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:39:33,167 DEBUG [Time-limited test {}] regionserver.HStore(1541): cd4c26dcc706ee34776aa8d52b048c3d/info is initiating minor compaction (all files) 2024-11-20T23:39:33,167 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T23:39:33,167 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:33,167 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of cd4c26dcc706ee34776aa8d52b048c3d/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:33,167 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/cde3393d5f714e849b52638a458c1988, hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/5350f68211cb4076ad162d889cb8815d, hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/011afad97ecf4d4cbbcc83d7e0246fd9] into tmpdir=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp, totalSize=17.7 K 2024-11-20T23:39:33,168 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting cde3393d5f714e849b52638a458c1988, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732145942855 2024-11-20T23:39:33,168 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5350f68211cb4076ad162d889cb8815d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732145952887 2024-11-20T23:39:33,169 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 011afad97ecf4d4cbbcc83d7e0246fd9, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732145962927 2024-11-20T23:39:33,185 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): cd4c26dcc706ee34776aa8d52b048c3d#info#compaction#46 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:39:33,186 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/0680a5a108844213a5d0be6588267dc7 is 1080, key is row0001/info:/1732145942855/Put/seqid=0 2024-11-20T23:39:33,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741841_1017 (size=8296) 2024-11-20T23:39:33,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741841_1017 (size=8296) 2024-11-20T23:39:33,203 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/0680a5a108844213a5d0be6588267dc7 as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/0680a5a108844213a5d0be6588267dc7 2024-11-20T23:39:33,212 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in cd4c26dcc706ee34776aa8d52b048c3d/info of cd4c26dcc706ee34776aa8d52b048c3d into 0680a5a108844213a5d0be6588267dc7(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:39:33,212 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for cd4c26dcc706ee34776aa8d52b048c3d: 2024-11-20T23:39:33,215 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C43969%2C1732145931154.1732145973215 2024-11-20T23:39:33,225 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:33,225 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:33,226 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:33,226 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:33,226 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:33,226 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154/412a5e44fd2e%2C43969%2C1732145931154.1732145962928 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154/412a5e44fd2e%2C43969%2C1732145931154.1732145973215 2024-11-20T23:39:33,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741839_1015 (size=2520) 2024-11-20T23:39:33,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741839_1015 (size=2520) 2024-11-20T23:39:33,236 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41817:41817),(127.0.0.1/127.0.0.1:46487:46487)] 2024-11-20T23:39:33,241 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154/412a5e44fd2e%2C43969%2C1732145931154.1732145932362 to hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/oldWALs/412a5e44fd2e%2C43969%2C1732145931154.1732145932362 2024-11-20T23:39:33,241 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T23:39:33,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T23:39:33,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-20T23:39:33,244 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T23:39:33,245 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T23:39:33,245 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T23:39:33,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43969 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-20T23:39:33,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:33,398 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing cd4c26dcc706ee34776aa8d52b048c3d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T23:39:33,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/a8ce54a14e7b4256810c743302e13615 is 1080, key is row0000/info:/1732145973213/Put/seqid=0 2024-11-20T23:39:33,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741843_1019 (size=6033) 2024-11-20T23:39:33,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741843_1019 (size=6033) 2024-11-20T23:39:33,407 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/a8ce54a14e7b4256810c743302e13615 2024-11-20T23:39:33,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/a8ce54a14e7b4256810c743302e13615 as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/a8ce54a14e7b4256810c743302e13615 2024-11-20T23:39:33,419 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/a8ce54a14e7b4256810c743302e13615, entries=1, sequenceid=18, filesize=5.9 K 2024-11-20T23:39:33,420 INFO [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cd4c26dcc706ee34776aa8d52b048c3d in 22ms, sequenceid=18, compaction requested=false 2024-11-20T23:39:33,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for cd4c26dcc706ee34776aa8d52b048c3d: 2024-11-20T23:39:33,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:33,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-20T23:39:33,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-20T23:39:33,425 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-20T23:39:33,425 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-20T23:39:33,427 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-20T23:39:33,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:33,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:33,632 INFO [master/412a5e44fd2e:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T23:39:33,632 INFO [master/412a5e44fd2e:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T23:39:34,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:34,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:35,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:35,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:36,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:36,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:37,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:37,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:38,142 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region cd4c26dcc706ee34776aa8d52b048c3d, had cached 0 bytes from a total of 14329 2024-11-20T23:39:38,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:38,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:39,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:39,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:40,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:40,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:41,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:41,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:42,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:42,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:43,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-20T23:39:43,275 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T23:39:43,278 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C43969%2C1732145931154.1732145983278 2024-11-20T23:39:43,284 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,284 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,284 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,284 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,285 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,285 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154/412a5e44fd2e%2C43969%2C1732145931154.1732145973215 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154/412a5e44fd2e%2C43969%2C1732145931154.1732145983278 2024-11-20T23:39:43,285 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46487:46487),(127.0.0.1/127.0.0.1:41817:41817)] 2024-11-20T23:39:43,285 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154/412a5e44fd2e%2C43969%2C1732145931154.1732145973215 is not closed yet, will try archiving it next time 2024-11-20T23:39:43,286 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/WALs/412a5e44fd2e,43969,1732145931154/412a5e44fd2e%2C43969%2C1732145931154.1732145962928 to hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/oldWALs/412a5e44fd2e%2C43969%2C1732145931154.1732145962928 2024-11-20T23:39:43,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T23:39:43,286 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:39:43,286 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:39:43,286 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:39:43,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741842_1018 (size=2026) 2024-11-20T23:39:43,286 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:39:43,286 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T23:39:43,286 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T23:39:43,286 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=415953718, stopped=false 2024-11-20T23:39:43,286 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=412a5e44fd2e,39945,1732145930972 2024-11-20T23:39:43,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741842_1018 (size=2026) 2024-11-20T23:39:43,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:39:43,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:39:43,341 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:39:43,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:43,341 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:39:43,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:43,341 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:39:43,341 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:39:43,341 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '412a5e44fd2e,43969,1732145931154' ***** 2024-11-20T23:39:43,341 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T23:39:43,341 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:39:43,341 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:39:43,342 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T23:39:43,342 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T23:39:43,342 INFO [RS:0;412a5e44fd2e:43969 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T23:39:43,342 INFO [RS:0;412a5e44fd2e:43969 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T23:39:43,342 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(3091): Received CLOSE for cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:39:43,342 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(959): stopping server 412a5e44fd2e,43969,1732145931154 2024-11-20T23:39:43,342 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:39:43,342 INFO [RS:0;412a5e44fd2e:43969 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;412a5e44fd2e:43969. 2024-11-20T23:39:43,342 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cd4c26dcc706ee34776aa8d52b048c3d, disabling compactions & flushes 2024-11-20T23:39:43,342 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:43,342 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:43,342 DEBUG [RS:0;412a5e44fd2e:43969 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:39:43,342 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. after waiting 0 ms 2024-11-20T23:39:43,342 DEBUG [RS:0;412a5e44fd2e:43969 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:39:43,342 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:43,343 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T23:39:43,343 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T23:39:43,343 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing cd4c26dcc706ee34776aa8d52b048c3d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T23:39:43,343 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T23:39:43,343 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T23:39:43,345 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T23:39:43,345 DEBUG [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(1325): Online Regions={cd4c26dcc706ee34776aa8d52b048c3d=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T23:39:43,345 DEBUG [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, cd4c26dcc706ee34776aa8d52b048c3d 2024-11-20T23:39:43,345 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:39:43,345 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:39:43,345 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:39:43,345 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:39:43,345 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:39:43,345 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-20T23:39:43,347 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/d1026db8488e4cf3a238a398e4589198 is 1080, key is row0001/info:/1732145983277/Put/seqid=0 2024-11-20T23:39:43,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741845_1021 (size=6033) 2024-11-20T23:39:43,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741845_1021 (size=6033) 2024-11-20T23:39:43,355 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/d1026db8488e4cf3a238a398e4589198 2024-11-20T23:39:43,362 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/.tmp/info/d1026db8488e4cf3a238a398e4589198 as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/d1026db8488e4cf3a238a398e4589198 2024-11-20T23:39:43,368 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/.tmp/info/5c139a4b868e40a5aa6c9583058013ba is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d./info:regioninfo/1732145933162/Put/seqid=0 2024-11-20T23:39:43,371 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/d1026db8488e4cf3a238a398e4589198, entries=1, sequenceid=22, filesize=5.9 K 2024-11-20T23:39:43,372 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cd4c26dcc706ee34776aa8d52b048c3d in 30ms, sequenceid=22, compaction requested=true 2024-11-20T23:39:43,373 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/cde3393d5f714e849b52638a458c1988, hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/5350f68211cb4076ad162d889cb8815d, hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/011afad97ecf4d4cbbcc83d7e0246fd9] to archive 2024-11-20T23:39:43,374 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T23:39:43,375 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/cde3393d5f714e849b52638a458c1988 to hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/cde3393d5f714e849b52638a458c1988 2024-11-20T23:39:43,376 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/5350f68211cb4076ad162d889cb8815d to hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/5350f68211cb4076ad162d889cb8815d 2024-11-20T23:39:43,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741846_1022 (size=7308) 2024-11-20T23:39:43,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741846_1022 (size=7308) 2024-11-20T23:39:43,378 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/011afad97ecf4d4cbbcc83d7e0246fd9 to hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/info/011afad97ecf4d4cbbcc83d7e0246fd9 2024-11-20T23:39:43,378 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/.tmp/info/5c139a4b868e40a5aa6c9583058013ba 2024-11-20T23:39:43,378 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=412a5e44fd2e:39945 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-20T23:39:43,378 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [cde3393d5f714e849b52638a458c1988=6033, 5350f68211cb4076ad162d889cb8815d=6033, 011afad97ecf4d4cbbcc83d7e0246fd9=6033] 2024-11-20T23:39:43,384 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd4c26dcc706ee34776aa8d52b048c3d/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-20T23:39:43,385 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:43,385 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cd4c26dcc706ee34776aa8d52b048c3d: Waiting for close lock at 1732145983342Running coprocessor pre-close hooks at 1732145983342Disabling compacts and flushes for region at 1732145983342Disabling writes for close at 1732145983342Obtaining lock to block concurrent updates at 1732145983343 (+1 ms)Preparing flush snapshotting stores in cd4c26dcc706ee34776aa8d52b048c3d at 1732145983343Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732145983343Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. at 1732145983343Flushing cd4c26dcc706ee34776aa8d52b048c3d/info: creating writer at 1732145983344 (+1 ms)Flushing cd4c26dcc706ee34776aa8d52b048c3d/info: appending metadata at 1732145983347 (+3 ms)Flushing cd4c26dcc706ee34776aa8d52b048c3d/info: closing flushed file at 1732145983347Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@417f27: reopening flushed file at 1732145983361 (+14 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cd4c26dcc706ee34776aa8d52b048c3d in 30ms, sequenceid=22, compaction requested=true at 1732145983372 (+11 ms)Writing region close event to WAL at 1732145983379 (+7 ms)Running coprocessor post-close hooks at 1732145983385 (+6 ms)Closed at 1732145983385 2024-11-20T23:39:43,385 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732145932795.cd4c26dcc706ee34776aa8d52b048c3d. 2024-11-20T23:39:43,404 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/.tmp/ns/21c1508da35e4ee69589d50e999de820 is 43, key is default/ns:d/1732145932699/Put/seqid=0 2024-11-20T23:39:43,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741847_1023 (size=5153) 2024-11-20T23:39:43,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741847_1023 (size=5153) 2024-11-20T23:39:43,409 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/.tmp/ns/21c1508da35e4ee69589d50e999de820 2024-11-20T23:39:43,433 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/.tmp/table/535874a2d4b84cb09e47517f9a58589e is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732145933174/Put/seqid=0 2024-11-20T23:39:43,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741848_1024 (size=5508) 2024-11-20T23:39:43,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741848_1024 (size=5508) 2024-11-20T23:39:43,443 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/.tmp/table/535874a2d4b84cb09e47517f9a58589e 2024-11-20T23:39:43,454 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/.tmp/info/5c139a4b868e40a5aa6c9583058013ba as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/info/5c139a4b868e40a5aa6c9583058013ba 2024-11-20T23:39:43,471 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/info/5c139a4b868e40a5aa6c9583058013ba, entries=10, sequenceid=11, filesize=7.1 K 2024-11-20T23:39:43,473 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/.tmp/ns/21c1508da35e4ee69589d50e999de820 as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/ns/21c1508da35e4ee69589d50e999de820 2024-11-20T23:39:43,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:43,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:43,482 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/ns/21c1508da35e4ee69589d50e999de820, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T23:39:43,483 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/.tmp/table/535874a2d4b84cb09e47517f9a58589e as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/table/535874a2d4b84cb09e47517f9a58589e 2024-11-20T23:39:43,499 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/table/535874a2d4b84cb09e47517f9a58589e, entries=2, sequenceid=11, filesize=5.4 K 2024-11-20T23:39:43,502 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 157ms, sequenceid=11, compaction requested=false 2024-11-20T23:39:43,525 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T23:39:43,526 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:39:43,526 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:39:43,526 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732145983345Running coprocessor pre-close hooks at 1732145983345Disabling compacts and flushes for region at 1732145983345Disabling writes for close at 1732145983345Obtaining lock to block concurrent updates at 1732145983345Preparing flush snapshotting stores in 1588230740 at 1732145983345Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732145983346 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732145983346Flushing 1588230740/info: creating writer at 1732145983346Flushing 1588230740/info: appending metadata at 1732145983367 (+21 ms)Flushing 1588230740/info: closing flushed file at 1732145983367Flushing 1588230740/ns: creating writer at 1732145983384 (+17 ms)Flushing 1588230740/ns: appending metadata at 1732145983403 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1732145983404 (+1 ms)Flushing 1588230740/table: creating writer at 1732145983415 (+11 ms)Flushing 1588230740/table: appending metadata at 1732145983433 (+18 ms)Flushing 1588230740/table: closing flushed file at 1732145983433Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5911f1f0: reopening flushed file at 1732145983452 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b762f64: reopening flushed file at 1732145983471 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b87e1a9: reopening flushed file at 1732145983482 (+11 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 157ms, sequenceid=11, compaction requested=false at 1732145983502 (+20 ms)Writing region close event to WAL at 1732145983516 (+14 ms)Running coprocessor post-close hooks at 1732145983525 (+9 ms)Closed at 1732145983526 (+1 ms) 2024-11-20T23:39:43,526 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T23:39:43,545 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(976): stopping server 412a5e44fd2e,43969,1732145931154; all regions closed. 2024-11-20T23:39:43,546 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,546 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,546 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,546 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,546 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741834_1010 (size=3306) 2024-11-20T23:39:43,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741834_1010 (size=3306) 2024-11-20T23:39:43,553 DEBUG [RS:0;412a5e44fd2e:43969 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/oldWALs 2024-11-20T23:39:43,553 INFO [RS:0;412a5e44fd2e:43969 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C43969%2C1732145931154.meta:.meta(num 1732145932621) 2024-11-20T23:39:43,554 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,554 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,554 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,554 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,554 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741844_1020 (size=1252) 2024-11-20T23:39:43,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741844_1020 (size=1252) 2024-11-20T23:39:43,564 DEBUG [RS:0;412a5e44fd2e:43969 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/oldWALs 2024-11-20T23:39:43,564 INFO [RS:0;412a5e44fd2e:43969 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C43969%2C1732145931154:(num 1732145983278) 2024-11-20T23:39:43,564 DEBUG [RS:0;412a5e44fd2e:43969 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:39:43,564 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:39:43,564 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:39:43,565 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.ChoreService(370): Chore service for: regionserver/412a5e44fd2e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T23:39:43,565 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:39:43,565 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:39:43,565 INFO [RS:0;412a5e44fd2e:43969 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43969 2024-11-20T23:39:43,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/412a5e44fd2e,43969,1732145931154 2024-11-20T23:39:43,593 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:39:43,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:39:43,604 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [412a5e44fd2e,43969,1732145931154] 2024-11-20T23:39:43,614 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/412a5e44fd2e,43969,1732145931154 already deleted, retry=false 2024-11-20T23:39:43,614 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 412a5e44fd2e,43969,1732145931154 expired; onlineServers=0 2024-11-20T23:39:43,614 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '412a5e44fd2e,39945,1732145930972' ***** 2024-11-20T23:39:43,614 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T23:39:43,614 INFO [M:0;412a5e44fd2e:39945 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:39:43,614 INFO [M:0;412a5e44fd2e:39945 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:39:43,614 DEBUG [M:0;412a5e44fd2e:39945 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T23:39:43,614 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T23:39:43,614 DEBUG [M:0;412a5e44fd2e:39945 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T23:39:43,614 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145931735 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145931735,5,FailOnTimeoutGroup] 2024-11-20T23:39:43,614 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145931736 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145931736,5,FailOnTimeoutGroup] 2024-11-20T23:39:43,615 INFO [M:0;412a5e44fd2e:39945 {}] hbase.ChoreService(370): Chore service for: master/412a5e44fd2e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T23:39:43,615 INFO [M:0;412a5e44fd2e:39945 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:39:43,615 DEBUG [M:0;412a5e44fd2e:39945 {}] master.HMaster(1795): Stopping service threads 2024-11-20T23:39:43,615 INFO [M:0;412a5e44fd2e:39945 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T23:39:43,615 INFO [M:0;412a5e44fd2e:39945 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:39:43,615 INFO [M:0;412a5e44fd2e:39945 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T23:39:43,615 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T23:39:43,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T23:39:43,628 DEBUG [M:0;412a5e44fd2e:39945 {}] zookeeper.ZKUtil(347): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T23:39:43,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:43,628 WARN [M:0;412a5e44fd2e:39945 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T23:39:43,629 INFO [M:0;412a5e44fd2e:39945 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/.lastflushedseqids 2024-11-20T23:39:43,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741849_1025 (size=130) 2024-11-20T23:39:43,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741849_1025 (size=130) 2024-11-20T23:39:43,634 INFO [M:0;412a5e44fd2e:39945 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T23:39:43,634 INFO [M:0;412a5e44fd2e:39945 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T23:39:43,634 DEBUG [M:0;412a5e44fd2e:39945 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:39:43,634 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:39:43,634 DEBUG [M:0;412a5e44fd2e:39945 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:39:43,634 DEBUG [M:0;412a5e44fd2e:39945 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:39:43,634 DEBUG [M:0;412a5e44fd2e:39945 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:39:43,635 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-20T23:39:43,656 DEBUG [M:0;412a5e44fd2e:39945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/12d0fc610ffa4baaafd2539caf8a3d77 is 82, key is hbase:meta,,1/info:regioninfo/1732145932651/Put/seqid=0 2024-11-20T23:39:43,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741850_1026 (size=5672) 2024-11-20T23:39:43,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741850_1026 (size=5672) 2024-11-20T23:39:43,662 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/12d0fc610ffa4baaafd2539caf8a3d77 2024-11-20T23:39:43,686 DEBUG [M:0;412a5e44fd2e:39945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/af1885dbe8924eb4a7cf7050acae9d65 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732145933179/Put/seqid=0 2024-11-20T23:39:43,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741851_1027 (size=7823) 2024-11-20T23:39:43,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741851_1027 (size=7823) 2024-11-20T23:39:43,691 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/af1885dbe8924eb4a7cf7050acae9d65 2024-11-20T23:39:43,696 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for af1885dbe8924eb4a7cf7050acae9d65 2024-11-20T23:39:43,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:39:43,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43969-0x1015a9d5e760001, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:39:43,708 INFO [RS:0;412a5e44fd2e:43969 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:39:43,708 INFO [RS:0;412a5e44fd2e:43969 {}] regionserver.HRegionServer(1031): Exiting; stopping=412a5e44fd2e,43969,1732145931154; zookeeper connection closed. 2024-11-20T23:39:43,709 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7ae9ed4d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7ae9ed4d 2024-11-20T23:39:43,709 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T23:39:43,716 DEBUG [M:0;412a5e44fd2e:39945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eac53ec11a0a4527ba632a86f9655c16 is 69, key is 412a5e44fd2e,43969,1732145931154/rs:state/1732145932134/Put/seqid=0 2024-11-20T23:39:43,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741852_1028 (size=5156) 2024-11-20T23:39:43,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741852_1028 (size=5156) 2024-11-20T23:39:43,722 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eac53ec11a0a4527ba632a86f9655c16 2024-11-20T23:39:43,741 DEBUG [M:0;412a5e44fd2e:39945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0ec63695b8874ad9b3d6459dc6184de7 is 52, key is load_balancer_on/state:d/1732145932791/Put/seqid=0 2024-11-20T23:39:43,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741853_1029 (size=5056) 2024-11-20T23:39:43,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741853_1029 (size=5056) 2024-11-20T23:39:43,754 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0ec63695b8874ad9b3d6459dc6184de7 2024-11-20T23:39:43,759 DEBUG [M:0;412a5e44fd2e:39945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/12d0fc610ffa4baaafd2539caf8a3d77 as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/12d0fc610ffa4baaafd2539caf8a3d77 2024-11-20T23:39:43,764 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/12d0fc610ffa4baaafd2539caf8a3d77, entries=8, sequenceid=121, filesize=5.5 K 2024-11-20T23:39:43,765 DEBUG [M:0;412a5e44fd2e:39945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/af1885dbe8924eb4a7cf7050acae9d65 as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/af1885dbe8924eb4a7cf7050acae9d65 2024-11-20T23:39:43,771 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for af1885dbe8924eb4a7cf7050acae9d65 2024-11-20T23:39:43,771 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/af1885dbe8924eb4a7cf7050acae9d65, entries=14, sequenceid=121, filesize=7.6 K 2024-11-20T23:39:43,772 DEBUG [M:0;412a5e44fd2e:39945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eac53ec11a0a4527ba632a86f9655c16 as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/eac53ec11a0a4527ba632a86f9655c16 2024-11-20T23:39:43,778 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/eac53ec11a0a4527ba632a86f9655c16, entries=1, sequenceid=121, filesize=5.0 K 2024-11-20T23:39:43,779 DEBUG [M:0;412a5e44fd2e:39945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0ec63695b8874ad9b3d6459dc6184de7 as hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0ec63695b8874ad9b3d6459dc6184de7 2024-11-20T23:39:43,784 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33281/user/jenkins/test-data/23008bdd-435b-58e1-70b9-53a34896fcca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0ec63695b8874ad9b3d6459dc6184de7, entries=1, sequenceid=121, filesize=4.9 K 2024-11-20T23:39:43,785 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44641, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=121, compaction requested=false 2024-11-20T23:39:43,787 INFO [M:0;412a5e44fd2e:39945 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:39:43,787 DEBUG [M:0;412a5e44fd2e:39945 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732145983634Disabling compacts and flushes for region at 1732145983634Disabling writes for close at 1732145983634Obtaining lock to block concurrent updates at 1732145983635 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732145983635Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44641, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1732145983635Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732145983636 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732145983636Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732145983656 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732145983656Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732145983667 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732145983685 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732145983685Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732145983696 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732145983715 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732145983716 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732145983726 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732145983741 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732145983741Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47ca46ba: reopening flushed file at 1732145983758 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d88a5c1: reopening flushed file at 1732145983764 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13dcc177: reopening flushed file at 1732145983771 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3dcf59f0: reopening flushed file at 1732145983778 (+7 ms)Finished flush of dataSize ~43.59 KB/44641, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=121, compaction requested=false at 1732145983785 (+7 ms)Writing region close event to WAL at 1732145983787 (+2 ms)Closed at 1732145983787 2024-11-20T23:39:43,787 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,787 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,787 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,787 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,788 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:39:43,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37437 is added to blk_1073741830_1006 (size=53038) 2024-11-20T23:39:43,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42979 is added to blk_1073741830_1006 (size=53038) 2024-11-20T23:39:43,791 INFO [M:0;412a5e44fd2e:39945 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T23:39:43,791 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:39:43,791 INFO [M:0;412a5e44fd2e:39945 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39945 2024-11-20T23:39:43,791 INFO [M:0;412a5e44fd2e:39945 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:39:43,904 INFO [M:0;412a5e44fd2e:39945 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:39:43,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:39:43,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39945-0x1015a9d5e760000, quorum=127.0.0.1:65104, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:39:43,907 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a3c6b7a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:39:43,907 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1beefc80{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:39:43,907 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:39:43,907 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@555a4a92{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:39:43,907 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@311facd9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/hadoop.log.dir/,STOPPED} 2024-11-20T23:39:43,909 WARN [BP-1751864290-172.17.0.2-1732145928466 heartbeating to localhost/127.0.0.1:33281 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:39:43,909 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:39:43,909 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:39:43,909 WARN [BP-1751864290-172.17.0.2-1732145928466 heartbeating to localhost/127.0.0.1:33281 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1751864290-172.17.0.2-1732145928466 (Datanode Uuid 4fce3929-157c-4ad0-acc8-229f9005b9de) service to localhost/127.0.0.1:33281 2024-11-20T23:39:43,910 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/cluster_bc7d60bd-7218-eab4-2f5f-d3654a22a956/data/data3/current/BP-1751864290-172.17.0.2-1732145928466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:39:43,910 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/cluster_bc7d60bd-7218-eab4-2f5f-d3654a22a956/data/data4/current/BP-1751864290-172.17.0.2-1732145928466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:39:43,910 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:39:43,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5aa33ca4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:39:43,913 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3be31a0b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:39:43,913 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:39:43,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9b25e94{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:39:43,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@507832d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/hadoop.log.dir/,STOPPED} 2024-11-20T23:39:43,915 WARN [BP-1751864290-172.17.0.2-1732145928466 heartbeating to localhost/127.0.0.1:33281 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:39:43,916 WARN [BP-1751864290-172.17.0.2-1732145928466 heartbeating to localhost/127.0.0.1:33281 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1751864290-172.17.0.2-1732145928466 (Datanode Uuid 1d6dee23-25e5-4fec-9bc9-a82bce7e1309) service to localhost/127.0.0.1:33281 2024-11-20T23:39:43,916 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:39:43,916 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:39:43,916 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/cluster_bc7d60bd-7218-eab4-2f5f-d3654a22a956/data/data1/current/BP-1751864290-172.17.0.2-1732145928466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:39:43,917 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/cluster_bc7d60bd-7218-eab4-2f5f-d3654a22a956/data/data2/current/BP-1751864290-172.17.0.2-1732145928466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:39:43,918 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:39:43,927 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@188d3e33{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:39:43,928 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2aaa4790{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:39:43,929 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:39:43,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1af676f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:39:43,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fb33a9d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/hadoop.log.dir/,STOPPED} 2024-11-20T23:39:43,936 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T23:39:43,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T23:39:43,971 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 181) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33281 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33281 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33281 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/412a5e44fd2e:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33281 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33281 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:33281 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33281 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33281 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=485 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=325 (was 294) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=249 (was 257) 2024-11-20T23:39:43,979 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=325, ProcessCount=11, AvailableMemoryMB=249 2024-11-20T23:39:43,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T23:39:43,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/hadoop.log.dir so I do NOT create it in target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d 2024-11-20T23:39:43,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b4d88374-8e21-9821-ff90-9a692500cec4/hadoop.tmp.dir so I do NOT create it in target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d 2024-11-20T23:39:43,979 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/cluster_ab314d93-baee-14a4-b97f-d9f869779a1f, deleteOnExit=true 2024-11-20T23:39:43,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T23:39:43,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/test.cache.data in system properties and HBase conf 2024-11-20T23:39:43,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T23:39:43,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/hadoop.log.dir in system properties and HBase conf 2024-11-20T23:39:43,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T23:39:43,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T23:39:43,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T23:39:43,980 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T23:39:43,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:39:43,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:39:43,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T23:39:43,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:39:43,980 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T23:39:43,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T23:39:43,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:39:43,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:39:43,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T23:39:43,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/nfs.dump.dir in system properties and HBase conf 2024-11-20T23:39:43,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/java.io.tmpdir in system properties and HBase conf 2024-11-20T23:39:43,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:39:43,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T23:39:43,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T23:39:43,995 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:39:44,225 INFO [regionserver/412a5e44fd2e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:39:44,403 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:39:44,407 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:39:44,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:39:44,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:39:44,409 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:39:44,410 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:39:44,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@517c2cfc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:39:44,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a67ff9c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:39:44,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:44,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:44,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9612b29{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/java.io.tmpdir/jetty-localhost-34969-hadoop-hdfs-3_4_1-tests_jar-_-any-6720567444557387616/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:39:44,519 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@277e18bc{HTTP/1.1, (http/1.1)}{localhost:34969} 2024-11-20T23:39:44,519 INFO [Time-limited test {}] server.Server(415): Started @254719ms 2024-11-20T23:39:44,532 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:39:44,833 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:39:44,839 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:39:44,840 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:39:44,840 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:39:44,841 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T23:39:44,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@417c1a7a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:39:44,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7986f193{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:39:44,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ec16a78{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/java.io.tmpdir/jetty-localhost-42453-hadoop-hdfs-3_4_1-tests_jar-_-any-14505148996034209909/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:39:44,942 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a6db152{HTTP/1.1, (http/1.1)}{localhost:42453} 2024-11-20T23:39:44,942 INFO [Time-limited test {}] server.Server(415): Started @255142ms 2024-11-20T23:39:44,943 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:39:44,974 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:39:44,977 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:39:44,981 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:39:44,981 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:39:44,981 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:39:44,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41c54b7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:39:44,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1284b092{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:39:45,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76a5ebe7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/java.io.tmpdir/jetty-localhost-43153-hadoop-hdfs-3_4_1-tests_jar-_-any-6951547919000927410/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:39:45,087 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@268a31fc{HTTP/1.1, (http/1.1)}{localhost:43153} 2024-11-20T23:39:45,087 INFO [Time-limited test {}] server.Server(415): Started @255287ms 2024-11-20T23:39:45,088 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:39:45,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:45,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:46,293 WARN [Thread-1986 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/cluster_ab314d93-baee-14a4-b97f-d9f869779a1f/data/data1/current/BP-1136154938-172.17.0.2-1732145983999/current, will proceed with Du for space computation calculation, 2024-11-20T23:39:46,293 WARN [Thread-1987 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/cluster_ab314d93-baee-14a4-b97f-d9f869779a1f/data/data2/current/BP-1136154938-172.17.0.2-1732145983999/current, will proceed with Du for space computation calculation, 2024-11-20T23:39:46,311 WARN [Thread-1950 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:39:46,313 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf1c0f6511175c70b with lease ID 0x3a6db0df03bf81ff: Processing first storage report for DS-7b707ab1-55ae-428f-9de3-089bc04f20cc from datanode DatanodeRegistration(127.0.0.1:37643, datanodeUuid=bdf1ea94-15c8-474b-a5f6-fb9b49710790, infoPort=37713, infoSecurePort=0, ipcPort=33971, storageInfo=lv=-57;cid=testClusterID;nsid=1996696516;c=1732145983999) 2024-11-20T23:39:46,313 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf1c0f6511175c70b with lease ID 0x3a6db0df03bf81ff: from storage DS-7b707ab1-55ae-428f-9de3-089bc04f20cc node DatanodeRegistration(127.0.0.1:37643, datanodeUuid=bdf1ea94-15c8-474b-a5f6-fb9b49710790, infoPort=37713, infoSecurePort=0, ipcPort=33971, storageInfo=lv=-57;cid=testClusterID;nsid=1996696516;c=1732145983999), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:39:46,313 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf1c0f6511175c70b with lease ID 0x3a6db0df03bf81ff: Processing first storage report for DS-ac6e7bae-6535-4b58-96cc-28e28f934a30 from datanode DatanodeRegistration(127.0.0.1:37643, datanodeUuid=bdf1ea94-15c8-474b-a5f6-fb9b49710790, infoPort=37713, infoSecurePort=0, ipcPort=33971, storageInfo=lv=-57;cid=testClusterID;nsid=1996696516;c=1732145983999) 2024-11-20T23:39:46,314 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf1c0f6511175c70b with lease ID 0x3a6db0df03bf81ff: from storage DS-ac6e7bae-6535-4b58-96cc-28e28f934a30 node DatanodeRegistration(127.0.0.1:37643, datanodeUuid=bdf1ea94-15c8-474b-a5f6-fb9b49710790, infoPort=37713, infoSecurePort=0, ipcPort=33971, storageInfo=lv=-57;cid=testClusterID;nsid=1996696516;c=1732145983999), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:39:46,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:46,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:46,502 WARN [Thread-1997 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/cluster_ab314d93-baee-14a4-b97f-d9f869779a1f/data/data3/current/BP-1136154938-172.17.0.2-1732145983999/current, will proceed with Du for space computation calculation, 2024-11-20T23:39:46,502 WARN [Thread-1998 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/cluster_ab314d93-baee-14a4-b97f-d9f869779a1f/data/data4/current/BP-1136154938-172.17.0.2-1732145983999/current, will proceed with Du for space computation calculation, 2024-11-20T23:39:46,520 WARN [Thread-1973 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:39:46,522 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1bb2148bf7f90250 with lease ID 0x3a6db0df03bf8200: Processing first storage report for DS-9c885fa6-2cbd-48c3-956c-df203c508707 from datanode DatanodeRegistration(127.0.0.1:45255, datanodeUuid=f2abca98-4ce0-47cb-8d82-cfa1df5ffe95, infoPort=43117, infoSecurePort=0, ipcPort=36061, storageInfo=lv=-57;cid=testClusterID;nsid=1996696516;c=1732145983999) 2024-11-20T23:39:46,522 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1bb2148bf7f90250 with lease ID 0x3a6db0df03bf8200: from storage DS-9c885fa6-2cbd-48c3-956c-df203c508707 node DatanodeRegistration(127.0.0.1:45255, datanodeUuid=f2abca98-4ce0-47cb-8d82-cfa1df5ffe95, infoPort=43117, infoSecurePort=0, ipcPort=36061, storageInfo=lv=-57;cid=testClusterID;nsid=1996696516;c=1732145983999), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:39:46,522 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1bb2148bf7f90250 with lease ID 0x3a6db0df03bf8200: Processing first storage report for DS-553ffcb2-718e-4bb6-8d97-1f86cbb4f4fa from datanode DatanodeRegistration(127.0.0.1:45255, datanodeUuid=f2abca98-4ce0-47cb-8d82-cfa1df5ffe95, infoPort=43117, infoSecurePort=0, ipcPort=36061, storageInfo=lv=-57;cid=testClusterID;nsid=1996696516;c=1732145983999) 2024-11-20T23:39:46,522 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1bb2148bf7f90250 with lease ID 0x3a6db0df03bf8200: from storage DS-553ffcb2-718e-4bb6-8d97-1f86cbb4f4fa node DatanodeRegistration(127.0.0.1:45255, datanodeUuid=f2abca98-4ce0-47cb-8d82-cfa1df5ffe95, infoPort=43117, infoSecurePort=0, ipcPort=36061, storageInfo=lv=-57;cid=testClusterID;nsid=1996696516;c=1732145983999), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:39:46,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d 2024-11-20T23:39:46,623 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/cluster_ab314d93-baee-14a4-b97f-d9f869779a1f/zookeeper_0, clientPort=52619, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/cluster_ab314d93-baee-14a4-b97f-d9f869779a1f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/cluster_ab314d93-baee-14a4-b97f-d9f869779a1f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T23:39:46,624 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52619 2024-11-20T23:39:46,624 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:39:46,626 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:39:46,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:39:46,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:39:46,636 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e with version=8 2024-11-20T23:39:46,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/hbase-staging 2024-11-20T23:39:46,638 INFO [Time-limited test {}] client.ConnectionUtils(128): master/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:39:46,638 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:39:46,638 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:39:46,638 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:39:46,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:39:46,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:39:46,639 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T23:39:46,639 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:39:46,639 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43531 2024-11-20T23:39:46,641 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43531 connecting to ZooKeeper ensemble=127.0.0.1:52619 2024-11-20T23:39:46,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:435310x0, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:39:46,701 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43531-0x1015a9e37e90000 connected 2024-11-20T23:39:46,785 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:39:46,787 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:39:46,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:39:46,791 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e, hbase.cluster.distributed=false 2024-11-20T23:39:46,794 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:39:46,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43531 2024-11-20T23:39:46,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43531 2024-11-20T23:39:46,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43531 2024-11-20T23:39:46,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43531 2024-11-20T23:39:46,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43531 2024-11-20T23:39:46,811 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:39:46,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:39:46,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:39:46,811 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:39:46,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:39:46,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:39:46,811 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T23:39:46,812 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:39:46,812 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37437 2024-11-20T23:39:46,814 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37437 connecting to ZooKeeper ensemble=127.0.0.1:52619 2024-11-20T23:39:46,814 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:39:46,815 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:39:46,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:374370x0, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:39:46,828 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:39:46,828 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37437-0x1015a9e37e90001 connected 2024-11-20T23:39:46,828 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T23:39:46,829 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T23:39:46,829 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T23:39:46,831 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:39:46,832 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37437 2024-11-20T23:39:46,832 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37437 2024-11-20T23:39:46,832 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37437 2024-11-20T23:39:46,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37437 2024-11-20T23:39:46,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37437 2024-11-20T23:39:46,847 DEBUG [M:0;412a5e44fd2e:43531 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;412a5e44fd2e:43531 2024-11-20T23:39:46,848 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/412a5e44fd2e,43531,1732145986638 2024-11-20T23:39:46,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:39:46,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:39:46,859 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/412a5e44fd2e,43531,1732145986638 2024-11-20T23:39:46,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T23:39:46,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:46,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:46,870 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T23:39:46,871 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/412a5e44fd2e,43531,1732145986638 from backup master directory 2024-11-20T23:39:46,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/412a5e44fd2e,43531,1732145986638 2024-11-20T23:39:46,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:39:46,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:39:46,880 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:39:46,880 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=412a5e44fd2e,43531,1732145986638 2024-11-20T23:39:46,887 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/hbase.id] with ID: 7c45d933-4d12-4dd4-8e55-12c032fdc37c 2024-11-20T23:39:46,888 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/.tmp/hbase.id 2024-11-20T23:39:46,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:39:46,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:39:46,896 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/.tmp/hbase.id]:[hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/hbase.id] 2024-11-20T23:39:46,911 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:39:46,911 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T23:39:46,913 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-20T23:39:46,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:46,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:46,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:39:46,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:39:46,928 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T23:39:46,928 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T23:39:46,929 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:39:46,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:39:46,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:39:46,935 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store 2024-11-20T23:39:46,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:39:46,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:39:46,942 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:39:46,942 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:39:46,942 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:39:46,942 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:39:46,942 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:39:46,942 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:39:46,942 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:39:46,942 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732145986942Disabling compacts and flushes for region at 1732145986942Disabling writes for close at 1732145986942Writing region close event to WAL at 1732145986942Closed at 1732145986942 2024-11-20T23:39:46,943 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/.initializing 2024-11-20T23:39:46,943 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/WALs/412a5e44fd2e,43531,1732145986638 2024-11-20T23:39:46,945 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C43531%2C1732145986638, suffix=, logDir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/WALs/412a5e44fd2e,43531,1732145986638, archiveDir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/oldWALs, maxLogs=10 2024-11-20T23:39:46,946 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C43531%2C1732145986638.1732145986946 2024-11-20T23:39:46,950 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/WALs/412a5e44fd2e,43531,1732145986638/412a5e44fd2e%2C43531%2C1732145986638.1732145986946 2024-11-20T23:39:46,951 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:37713:37713)] 2024-11-20T23:39:46,951 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:39:46,951 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:39:46,951 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:39:46,951 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:39:46,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:39:46,955 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T23:39:46,956 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:46,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:39:46,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:39:46,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T23:39:46,957 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:46,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:39:46,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:39:46,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T23:39:46,958 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:46,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:39:46,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:39:46,960 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T23:39:46,960 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:46,960 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:39:46,960 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:39:46,961 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:39:46,961 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:39:46,962 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:39:46,962 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:39:46,963 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T23:39:46,964 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:39:46,968 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:39:46,969 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741563, jitterRate=-0.05705462396144867}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T23:39:46,969 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732145986951Initializing all the Stores at 1732145986952 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145986952Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145986954 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145986954Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145986954Cleaning up temporary data from old regions at 1732145986962 (+8 ms)Region opened successfully at 1732145986969 (+7 ms) 2024-11-20T23:39:46,970 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T23:39:46,973 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7954c85b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:39:46,974 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T23:39:46,974 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T23:39:46,974 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T23:39:46,974 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T23:39:46,975 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T23:39:46,975 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T23:39:46,975 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T23:39:46,978 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T23:39:46,978 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T23:39:46,985 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T23:39:46,985 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T23:39:46,986 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T23:39:46,995 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T23:39:46,996 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T23:39:46,997 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T23:39:46,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:39:46,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T23:39:46,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T23:39:47,006 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T23:39:47,007 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T23:39:47,017 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T23:39:47,021 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T23:39:47,027 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T23:39:47,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:39:47,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:39:47,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:47,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:47,039 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=412a5e44fd2e,43531,1732145986638, sessionid=0x1015a9e37e90000, setting cluster-up flag (Was=false) 2024-11-20T23:39:47,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:47,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:47,091 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T23:39:47,093 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,43531,1732145986638 2024-11-20T23:39:47,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:47,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:47,143 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T23:39:47,147 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,43531,1732145986638 2024-11-20T23:39:47,151 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T23:39:47,155 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T23:39:47,155 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T23:39:47,155 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T23:39:47,156 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 412a5e44fd2e,43531,1732145986638 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T23:39:47,158 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:39:47,158 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:39:47,158 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:39:47,158 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:39:47,158 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/412a5e44fd2e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T23:39:47,158 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,158 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:39:47,158 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,159 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732146017159 2024-11-20T23:39:47,159 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T23:39:47,159 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T23:39:47,159 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T23:39:47,159 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T23:39:47,159 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T23:39:47,159 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T23:39:47,160 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,160 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T23:39:47,160 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:39:47,160 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T23:39:47,160 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T23:39:47,160 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T23:39:47,160 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T23:39:47,160 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T23:39:47,161 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145987160,5,FailOnTimeoutGroup] 2024-11-20T23:39:47,161 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145987161,5,FailOnTimeoutGroup] 2024-11-20T23:39:47,161 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,161 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T23:39:47,161 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,161 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,161 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:47,161 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T23:39:47,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:39:47,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:39:47,169 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T23:39:47,169 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e 2024-11-20T23:39:47,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:39:47,176 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:39:47,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:39:47,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:39:47,179 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:39:47,179 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:47,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:39:47,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:39:47,181 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:39:47,181 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:47,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:39:47,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:39:47,182 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:39:47,182 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:47,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:39:47,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:39:47,184 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:39:47,184 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:47,184 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:39:47,185 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:39:47,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740 2024-11-20T23:39:47,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740 2024-11-20T23:39:47,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:39:47,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:39:47,188 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:39:47,189 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:39:47,191 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:39:47,192 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=746859, jitterRate=-0.05031988024711609}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:39:47,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732145987176Initializing all the Stores at 1732145987177 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145987177Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145987177Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145987177Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145987177Cleaning up temporary data from old regions at 1732145987188 (+11 ms)Region opened successfully at 1732145987192 (+4 ms) 2024-11-20T23:39:47,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:39:47,193 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:39:47,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:39:47,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:39:47,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:39:47,193 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:39:47,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732145987193Disabling compacts and flushes for region at 1732145987193Disabling writes for close at 1732145987193Writing region close event to WAL at 1732145987193Closed at 1732145987193 2024-11-20T23:39:47,195 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:39:47,195 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T23:39:47,195 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T23:39:47,196 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:39:47,197 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T23:39:47,236 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(746): ClusterId : 7c45d933-4d12-4dd4-8e55-12c032fdc37c 2024-11-20T23:39:47,236 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T23:39:47,250 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T23:39:47,250 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T23:39:47,261 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T23:39:47,262 DEBUG [RS:0;412a5e44fd2e:37437 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54999355, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:39:47,279 DEBUG [RS:0;412a5e44fd2e:37437 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;412a5e44fd2e:37437 2024-11-20T23:39:47,279 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T23:39:47,279 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T23:39:47,279 DEBUG [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T23:39:47,280 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(2659): reportForDuty to master=412a5e44fd2e,43531,1732145986638 with port=37437, startcode=1732145986811 2024-11-20T23:39:47,280 DEBUG [RS:0;412a5e44fd2e:37437 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T23:39:47,281 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53759, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T23:39:47,282 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43531 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 412a5e44fd2e,37437,1732145986811 2024-11-20T23:39:47,282 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43531 {}] master.ServerManager(517): Registering regionserver=412a5e44fd2e,37437,1732145986811 2024-11-20T23:39:47,283 DEBUG [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e 2024-11-20T23:39:47,283 DEBUG [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46237 2024-11-20T23:39:47,283 DEBUG [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T23:39:47,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:39:47,291 DEBUG [RS:0;412a5e44fd2e:37437 {}] zookeeper.ZKUtil(111): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/412a5e44fd2e,37437,1732145986811 2024-11-20T23:39:47,291 WARN [RS:0;412a5e44fd2e:37437 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:39:47,291 INFO [RS:0;412a5e44fd2e:37437 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:39:47,291 DEBUG [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/WALs/412a5e44fd2e,37437,1732145986811 2024-11-20T23:39:47,291 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [412a5e44fd2e,37437,1732145986811] 2024-11-20T23:39:47,293 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T23:39:47,295 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T23:39:47,295 INFO [RS:0;412a5e44fd2e:37437 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T23:39:47,295 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,295 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T23:39:47,296 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T23:39:47,296 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,296 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,296 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,296 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,297 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,297 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,297 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:39:47,297 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,297 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,297 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,297 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,297 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,297 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:39:47,297 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:39:47,297 DEBUG [RS:0;412a5e44fd2e:37437 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:39:47,298 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,298 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,298 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,298 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,298 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,298 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,37437,1732145986811-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:39:47,313 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T23:39:47,313 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,37437,1732145986811-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,313 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,313 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.Replication(171): 412a5e44fd2e,37437,1732145986811 started 2024-11-20T23:39:47,326 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,326 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(1482): Serving as 412a5e44fd2e,37437,1732145986811, RpcServer on 412a5e44fd2e/172.17.0.2:37437, sessionid=0x1015a9e37e90001 2024-11-20T23:39:47,326 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T23:39:47,326 DEBUG [RS:0;412a5e44fd2e:37437 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 412a5e44fd2e,37437,1732145986811 2024-11-20T23:39:47,326 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,37437,1732145986811' 2024-11-20T23:39:47,326 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T23:39:47,327 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T23:39:47,327 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T23:39:47,327 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T23:39:47,328 DEBUG [RS:0;412a5e44fd2e:37437 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 412a5e44fd2e,37437,1732145986811 2024-11-20T23:39:47,328 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,37437,1732145986811' 2024-11-20T23:39:47,328 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T23:39:47,328 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T23:39:47,328 DEBUG [RS:0;412a5e44fd2e:37437 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T23:39:47,328 INFO [RS:0;412a5e44fd2e:37437 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T23:39:47,328 INFO [RS:0;412a5e44fd2e:37437 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T23:39:47,347 WARN [412a5e44fd2e:43531 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T23:39:47,433 INFO [RS:0;412a5e44fd2e:37437 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C37437%2C1732145986811, suffix=, logDir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/WALs/412a5e44fd2e,37437,1732145986811, archiveDir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/oldWALs, maxLogs=32 2024-11-20T23:39:47,434 INFO [RS:0;412a5e44fd2e:37437 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C37437%2C1732145986811.1732145987434 2024-11-20T23:39:47,446 INFO [RS:0;412a5e44fd2e:37437 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/WALs/412a5e44fd2e,37437,1732145986811/412a5e44fd2e%2C37437%2C1732145986811.1732145987434 2024-11-20T23:39:47,447 DEBUG [RS:0;412a5e44fd2e:37437 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37713:37713),(127.0.0.1/127.0.0.1:43117:43117)] 2024-11-20T23:39:47,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:47,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:47,598 DEBUG [412a5e44fd2e:43531 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T23:39:47,599 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=412a5e44fd2e,37437,1732145986811 2024-11-20T23:39:47,603 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,37437,1732145986811, state=OPENING 2024-11-20T23:39:47,616 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T23:39:47,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:47,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:39:47,628 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:39:47,628 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:39:47,628 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:39:47,628 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,37437,1732145986811}] 2024-11-20T23:39:47,783 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T23:39:47,786 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49975, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T23:39:47,790 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T23:39:47,790 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:39:47,792 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C37437%2C1732145986811.meta, suffix=.meta, logDir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/WALs/412a5e44fd2e,37437,1732145986811, archiveDir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/oldWALs, maxLogs=32 2024-11-20T23:39:47,793 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C37437%2C1732145986811.meta.1732145987792.meta 2024-11-20T23:39:47,798 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/WALs/412a5e44fd2e,37437,1732145986811/412a5e44fd2e%2C37437%2C1732145986811.meta.1732145987792.meta 2024-11-20T23:39:47,799 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37713:37713),(127.0.0.1/127.0.0.1:43117:43117)] 2024-11-20T23:39:47,799 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:39:47,800 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T23:39:47,800 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T23:39:47,800 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T23:39:47,800 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T23:39:47,800 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:39:47,800 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T23:39:47,800 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T23:39:47,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:39:47,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:39:47,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:47,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:39:47,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:39:47,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:39:47,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:47,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:39:47,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:39:47,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:39:47,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:47,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:39:47,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:39:47,807 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:39:47,807 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:47,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:39:47,807 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:39:47,808 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740 2024-11-20T23:39:47,808 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740 2024-11-20T23:39:47,810 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:39:47,810 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:39:47,810 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:39:47,811 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:39:47,812 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764990, jitterRate=-0.027265161275863647}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:39:47,812 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T23:39:47,813 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732145987800Writing region info on filesystem at 1732145987800Initializing all the Stores at 1732145987801 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145987801Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145987801Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145987801Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732145987801Cleaning up temporary data from old regions at 1732145987810 (+9 ms)Running coprocessor post-open hooks at 1732145987812 (+2 ms)Region opened successfully at 1732145987813 (+1 ms) 2024-11-20T23:39:47,814 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732145987782 2024-11-20T23:39:47,816 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T23:39:47,816 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T23:39:47,817 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=412a5e44fd2e,37437,1732145986811 2024-11-20T23:39:47,818 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,37437,1732145986811, state=OPEN 2024-11-20T23:39:47,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:39:47,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:39:47,860 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:39:47,860 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=412a5e44fd2e,37437,1732145986811 2024-11-20T23:39:47,860 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:39:47,868 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T23:39:47,868 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,37437,1732145986811 in 233 msec 2024-11-20T23:39:47,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T23:39:47,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 673 msec 2024-11-20T23:39:47,872 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:39:47,872 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T23:39:47,874 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:39:47,874 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,37437,1732145986811, seqNum=-1] 2024-11-20T23:39:47,874 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:39:47,875 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33479, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:39:47,881 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 728 msec 2024-11-20T23:39:47,881 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732145987881, completionTime=-1 2024-11-20T23:39:47,881 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T23:39:47,881 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T23:39:47,883 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T23:39:47,884 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732146047883 2024-11-20T23:39:47,884 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732146107884 2024-11-20T23:39:47,884 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-20T23:39:47,884 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,43531,1732145986638-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,884 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,43531,1732145986638-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,884 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,43531,1732145986638-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,884 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-412a5e44fd2e:43531, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,884 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,884 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,886 DEBUG [master/412a5e44fd2e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T23:39:47,889 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.009sec 2024-11-20T23:39:47,889 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T23:39:47,889 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T23:39:47,889 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T23:39:47,889 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T23:39:47,889 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T23:39:47,889 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,43531,1732145986638-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:39:47,889 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,43531,1732145986638-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T23:39:47,892 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T23:39:47,892 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T23:39:47,892 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,43531,1732145986638-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:39:47,937 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fabccb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:39:47,937 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 412a5e44fd2e,43531,-1 for getting cluster id 2024-11-20T23:39:47,937 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T23:39:47,940 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c45d933-4d12-4dd4-8e55-12c032fdc37c' 2024-11-20T23:39:47,941 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T23:39:47,941 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c45d933-4d12-4dd4-8e55-12c032fdc37c" 2024-11-20T23:39:47,941 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e5c5329, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:39:47,941 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [412a5e44fd2e,43531,-1] 2024-11-20T23:39:47,942 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T23:39:47,942 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:39:47,944 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51712, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T23:39:47,945 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74c5d0dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:39:47,945 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:39:47,947 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,37437,1732145986811, seqNum=-1] 2024-11-20T23:39:47,947 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:39:47,948 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34318, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:39:47,950 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=412a5e44fd2e,43531,1732145986638 2024-11-20T23:39:47,950 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:39:47,952 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T23:39:47,952 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T23:39:47,953 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 412a5e44fd2e,43531,1732145986638 2024-11-20T23:39:47,953 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@ba888ab 2024-11-20T23:39:47,953 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T23:39:47,954 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51718, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T23:39:47,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43531 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T23:39:47,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43531 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T23:39:47,955 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43531 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T23:39:47,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43531 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-20T23:39:47,957 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T23:39:47,957 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:47,957 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43531 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-20T23:39:47,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T23:39:47,958 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T23:39:47,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741835_1011 (size=381) 2024-11-20T23:39:47,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741835_1011 (size=381) 2024-11-20T23:39:47,966 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 11bd138fc4c82dfa760e3563dd81a50e, NAME => 'TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e 2024-11-20T23:39:47,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741836_1012 (size=64) 2024-11-20T23:39:47,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741836_1012 (size=64) 2024-11-20T23:39:47,972 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:39:47,972 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 11bd138fc4c82dfa760e3563dd81a50e, disabling compactions & flushes 2024-11-20T23:39:47,972 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:39:47,972 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:39:47,972 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. after waiting 0 ms 2024-11-20T23:39:47,972 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:39:47,972 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:39:47,972 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 11bd138fc4c82dfa760e3563dd81a50e: Waiting for close lock at 1732145987972Disabling compacts and flushes for region at 1732145987972Disabling writes for close at 1732145987972Writing region close event to WAL at 1732145987972Closed at 1732145987972 2024-11-20T23:39:47,974 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T23:39:47,974 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732145987974"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732145987974"}]},"ts":"1732145987974"} 2024-11-20T23:39:47,976 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T23:39:47,977 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T23:39:47,977 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732145987977"}]},"ts":"1732145987977"} 2024-11-20T23:39:47,979 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-20T23:39:47,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=11bd138fc4c82dfa760e3563dd81a50e, ASSIGN}] 2024-11-20T23:39:47,981 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=11bd138fc4c82dfa760e3563dd81a50e, ASSIGN 2024-11-20T23:39:47,981 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=11bd138fc4c82dfa760e3563dd81a50e, ASSIGN; state=OFFLINE, location=412a5e44fd2e,37437,1732145986811; forceNewPlan=false, retain=false 2024-11-20T23:39:48,132 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=11bd138fc4c82dfa760e3563dd81a50e, regionState=OPENING, regionLocation=412a5e44fd2e,37437,1732145986811 2024-11-20T23:39:48,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=11bd138fc4c82dfa760e3563dd81a50e, ASSIGN because future has completed 2024-11-20T23:39:48,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 11bd138fc4c82dfa760e3563dd81a50e, server=412a5e44fd2e,37437,1732145986811}] 2024-11-20T23:39:48,294 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:39:48,294 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 11bd138fc4c82dfa760e3563dd81a50e, NAME => 'TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:39:48,294 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:48,294 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:39:48,295 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:48,295 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:48,296 INFO [StoreOpener-11bd138fc4c82dfa760e3563dd81a50e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:48,297 INFO [StoreOpener-11bd138fc4c82dfa760e3563dd81a50e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 11bd138fc4c82dfa760e3563dd81a50e columnFamilyName info 2024-11-20T23:39:48,297 DEBUG [StoreOpener-11bd138fc4c82dfa760e3563dd81a50e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:39:48,297 INFO [StoreOpener-11bd138fc4c82dfa760e3563dd81a50e-1 {}] regionserver.HStore(327): Store=11bd138fc4c82dfa760e3563dd81a50e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:39:48,297 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:48,298 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:48,298 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:48,299 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:48,299 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:48,300 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:48,303 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:39:48,303 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 11bd138fc4c82dfa760e3563dd81a50e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=784317, jitterRate=-0.002690434455871582}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T23:39:48,303 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:48,304 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 11bd138fc4c82dfa760e3563dd81a50e: Running coprocessor pre-open hook at 1732145988295Writing region info on filesystem at 1732145988295Initializing all the Stores at 1732145988295Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732145988295Cleaning up temporary data from old regions at 1732145988299 (+4 ms)Running coprocessor post-open hooks at 1732145988303 (+4 ms)Region opened successfully at 1732145988304 (+1 ms) 2024-11-20T23:39:48,305 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., pid=6, masterSystemTime=1732145988291 2024-11-20T23:39:48,307 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:39:48,308 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:39:48,308 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=11bd138fc4c82dfa760e3563dd81a50e, regionState=OPEN, openSeqNum=2, regionLocation=412a5e44fd2e,37437,1732145986811 2024-11-20T23:39:48,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 11bd138fc4c82dfa760e3563dd81a50e, server=412a5e44fd2e,37437,1732145986811 because future has completed 2024-11-20T23:39:48,314 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T23:39:48,314 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 11bd138fc4c82dfa760e3563dd81a50e, server=412a5e44fd2e,37437,1732145986811 in 175 msec 2024-11-20T23:39:48,316 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T23:39:48,316 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=11bd138fc4c82dfa760e3563dd81a50e, ASSIGN in 335 msec 2024-11-20T23:39:48,317 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T23:39:48,318 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732145988317"}]},"ts":"1732145988317"} 2024-11-20T23:39:48,320 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-20T23:39:48,321 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T23:39:48,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 366 msec 2024-11-20T23:39:48,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:48,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:48,911 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T23:39:48,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,933 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,933 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,933 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,934 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,934 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,934 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:48,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:49,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:49,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:50,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:50,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:51,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:51,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:52,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:52,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:53,294 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T23:39:53,294 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-20T23:39:53,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:53,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:53,798 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T23:39:53,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:53,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:39:54,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:54,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:55,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:55,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:56,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:56,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:56,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T23:39:56,999 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T23:39:56,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:39:56,999 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T23:39:56,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-20T23:39:56,999 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-20T23:39:57,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:57,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T23:39:58,025 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-20T23:39:58,025 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-20T23:39:58,028 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-20T23:39:58,028 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:39:58,030 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., hostname=412a5e44fd2e,37437,1732145986811, seqNum=2] 2024-11-20T23:39:58,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:58,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 11bd138fc4c82dfa760e3563dd81a50e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T23:39:58,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/56c3444d613149baa1b0f6a3426b2f49 is 1080, key is row0001/info:/1732145998031/Put/seqid=0 2024-11-20T23:39:58,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741837_1013 (size=12509) 2024-11-20T23:39:58,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741837_1013 (size=12509) 2024-11-20T23:39:58,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/56c3444d613149baa1b0f6a3426b2f49 2024-11-20T23:39:58,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/56c3444d613149baa1b0f6a3426b2f49 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/56c3444d613149baa1b0f6a3426b2f49 2024-11-20T23:39:58,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/56c3444d613149baa1b0f6a3426b2f49, entries=7, sequenceid=11, filesize=12.2 K 2024-11-20T23:39:58,089 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 11bd138fc4c82dfa760e3563dd81a50e in 44ms, sequenceid=11, compaction requested=false 2024-11-20T23:39:58,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 11bd138fc4c82dfa760e3563dd81a50e: 2024-11-20T23:39:58,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:39:58,089 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 11bd138fc4c82dfa760e3563dd81a50e 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-20T23:39:58,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/db316cd3b4f54280833821eeac9a3fab is 1080, key is row0008/info:/1732145998045/Put/seqid=0 2024-11-20T23:39:58,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741838_1014 (size=26530) 2024-11-20T23:39:58,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741838_1014 (size=26530) 2024-11-20T23:39:58,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/db316cd3b4f54280833821eeac9a3fab 2024-11-20T23:39:58,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/db316cd3b4f54280833821eeac9a3fab as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/db316cd3b4f54280833821eeac9a3fab 2024-11-20T23:39:58,112 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/db316cd3b4f54280833821eeac9a3fab, entries=20, sequenceid=34, filesize=25.9 K 2024-11-20T23:39:58,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=5.25 KB/5380 for 11bd138fc4c82dfa760e3563dd81a50e in 24ms, sequenceid=34, compaction requested=false 2024-11-20T23:39:58,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 11bd138fc4c82dfa760e3563dd81a50e: 2024-11-20T23:39:58,113 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=38.1 K, sizeToCheck=16.0 K 2024-11-20T23:39:58,113 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:39:58,113 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/db316cd3b4f54280833821eeac9a3fab because midkey is the same as first or last row 2024-11-20T23:39:58,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:58,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:59,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:39:59,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:40:00,106 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 11bd138fc4c82dfa760e3563dd81a50e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T23:40:00,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/f1ddc137f6f14e179cbf70577c740cbc is 1080, key is row0028/info:/1732145998090/Put/seqid=0 2024-11-20T23:40:00,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741839_1015 (size=12509) 2024-11-20T23:40:00,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741839_1015 (size=12509) 2024-11-20T23:40:00,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/f1ddc137f6f14e179cbf70577c740cbc 2024-11-20T23:40:00,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/f1ddc137f6f14e179cbf70577c740cbc as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/f1ddc137f6f14e179cbf70577c740cbc 2024-11-20T23:40:00,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/f1ddc137f6f14e179cbf70577c740cbc, entries=7, sequenceid=44, filesize=12.2 K 2024-11-20T23:40:00,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 11bd138fc4c82dfa760e3563dd81a50e in 43ms, sequenceid=44, compaction requested=true 2024-11-20T23:40:00,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 11bd138fc4c82dfa760e3563dd81a50e: 2024-11-20T23:40:00,149 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=50.3 K, sizeToCheck=16.0 K 2024-11-20T23:40:00,149 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:40:00,150 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/db316cd3b4f54280833821eeac9a3fab because midkey is the same as first or last row 2024-11-20T23:40:00,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 11bd138fc4c82dfa760e3563dd81a50e:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:40:00,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:00,150 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:40:00,151 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 51548 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:40:00,151 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1541): 11bd138fc4c82dfa760e3563dd81a50e/info is initiating minor compaction (all files) 2024-11-20T23:40:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:40:00,151 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 11bd138fc4c82dfa760e3563dd81a50e/info in TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:40:00,151 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/56c3444d613149baa1b0f6a3426b2f49, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/db316cd3b4f54280833821eeac9a3fab, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/f1ddc137f6f14e179cbf70577c740cbc] into tmpdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp, totalSize=50.3 K 2024-11-20T23:40:00,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 11bd138fc4c82dfa760e3563dd81a50e 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-20T23:40:00,152 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 56c3444d613149baa1b0f6a3426b2f49, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732145998031 2024-11-20T23:40:00,152 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting db316cd3b4f54280833821eeac9a3fab, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732145998045 2024-11-20T23:40:00,153 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting f1ddc137f6f14e179cbf70577c740cbc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732145998090 2024-11-20T23:40:00,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/a5a507cff99243b99ba540126f5931cd is 1080, key is row0035/info:/1732146000107/Put/seqid=0 2024-11-20T23:40:00,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741840_1016 (size=18987) 2024-11-20T23:40:00,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741840_1016 (size=18987) 2024-11-20T23:40:00,171 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 11bd138fc4c82dfa760e3563dd81a50e#info#compaction#60 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:40:00,171 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/a4c43e7e44d74ce68c0004ca37127c5a is 1080, key is row0001/info:/1732145998031/Put/seqid=0 2024-11-20T23:40:00,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741841_1017 (size=41747) 2024-11-20T23:40:00,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741841_1017 (size=41747) 2024-11-20T23:40:00,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=11bd138fc4c82dfa760e3563dd81a50e, server=412a5e44fd2e,37437,1732145986811 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-20T23:40:00,192 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/a4c43e7e44d74ce68c0004ca37127c5a as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a4c43e7e44d74ce68c0004ca37127c5a 2024-11-20T23:40:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34318 deadline: 1732146010189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=11bd138fc4c82dfa760e3563dd81a50e, server=412a5e44fd2e,37437,1732145986811 2024-11-20T23:40:00,199 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 11bd138fc4c82dfa760e3563dd81a50e/info of 11bd138fc4c82dfa760e3563dd81a50e into a4c43e7e44d74ce68c0004ca37127c5a(size=40.8 K), total size for store is 40.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:40:00,199 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 11bd138fc4c82dfa760e3563dd81a50e: 2024-11-20T23:40:00,199 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., storeName=11bd138fc4c82dfa760e3563dd81a50e/info, priority=13, startTime=1732146000150; duration=0sec 2024-11-20T23:40:00,199 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.8 K, sizeToCheck=16.0 K 2024-11-20T23:40:00,199 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:40:00,199 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a4c43e7e44d74ce68c0004ca37127c5a because midkey is the same as first or last row 2024-11-20T23:40:00,199 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.8 K, sizeToCheck=16.0 K 2024-11-20T23:40:00,199 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:40:00,199 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a4c43e7e44d74ce68c0004ca37127c5a because midkey is the same as first or last row 2024-11-20T23:40:00,199 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.8 K, sizeToCheck=16.0 K 2024-11-20T23:40:00,199 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:40:00,199 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a4c43e7e44d74ce68c0004ca37127c5a because midkey is the same as first or last row 2024-11-20T23:40:00,199 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:00,199 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 11bd138fc4c82dfa760e3563dd81a50e:info 2024-11-20T23:40:00,219 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., hostname=412a5e44fd2e,37437,1732145986811, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., hostname=412a5e44fd2e,37437,1732145986811, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=11bd138fc4c82dfa760e3563dd81a50e, server=412a5e44fd2e,37437,1732145986811 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T23:40:00,219 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., hostname=412a5e44fd2e,37437,1732145986811, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=11bd138fc4c82dfa760e3563dd81a50e, server=412a5e44fd2e,37437,1732145986811 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T23:40:00,219 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., hostname=412a5e44fd2e,37437,1732145986811, seqNum=2 because the exception is null or not the one we care about 2024-11-20T23:40:00,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:00,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:00,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/a5a507cff99243b99ba540126f5931cd 2024-11-20T23:40:00,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/a5a507cff99243b99ba540126f5931cd as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a5a507cff99243b99ba540126f5931cd 2024-11-20T23:40:00,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a5a507cff99243b99ba540126f5931cd, entries=13, sequenceid=60, filesize=18.5 K 2024-11-20T23:40:00,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for 11bd138fc4c82dfa760e3563dd81a50e in 434ms, sequenceid=60, compaction requested=false 2024-11-20T23:40:00,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 11bd138fc4c82dfa760e3563dd81a50e: 2024-11-20T23:40:00,585 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-11-20T23:40:00,585 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:40:00,585 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a4c43e7e44d74ce68c0004ca37127c5a because midkey is the same as first or last row 2024-11-20T23:40:01,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:01,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:02,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:02,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:03,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:03,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:04,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:04,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:05,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:05,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:06,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:06,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:07,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:07,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:08,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:08,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:09,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:09,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:10,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:40:10,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 11bd138fc4c82dfa760e3563dd81a50e 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-20T23:40:10,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/ad6b12ec7ab4416395a16566e42844c6 is 1080, key is row0048/info:/1732146000153/Put/seqid=0 2024-11-20T23:40:10,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741842_1018 (size=23299) 2024-11-20T23:40:10,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741842_1018 (size=23299) 2024-11-20T23:40:10,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/ad6b12ec7ab4416395a16566e42844c6 2024-11-20T23:40:10,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/ad6b12ec7ab4416395a16566e42844c6 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/ad6b12ec7ab4416395a16566e42844c6 2024-11-20T23:40:10,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/ad6b12ec7ab4416395a16566e42844c6, entries=17, sequenceid=81, filesize=22.8 K 2024-11-20T23:40:10,319 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=0 B/0 for 11bd138fc4c82dfa760e3563dd81a50e in 23ms, sequenceid=81, compaction requested=true 2024-11-20T23:40:10,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 11bd138fc4c82dfa760e3563dd81a50e: 2024-11-20T23:40:10,319 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=82.1 K, sizeToCheck=16.0 K 2024-11-20T23:40:10,319 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:40:10,320 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a4c43e7e44d74ce68c0004ca37127c5a because midkey is the same as first or last row 2024-11-20T23:40:10,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 11bd138fc4c82dfa760e3563dd81a50e:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:40:10,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:10,320 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:40:10,321 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84033 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:40:10,321 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1541): 11bd138fc4c82dfa760e3563dd81a50e/info is initiating minor compaction (all files) 2024-11-20T23:40:10,321 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 11bd138fc4c82dfa760e3563dd81a50e/info in TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:40:10,321 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a4c43e7e44d74ce68c0004ca37127c5a, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a5a507cff99243b99ba540126f5931cd, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/ad6b12ec7ab4416395a16566e42844c6] into tmpdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp, totalSize=82.1 K 2024-11-20T23:40:10,322 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting a4c43e7e44d74ce68c0004ca37127c5a, keycount=34, bloomtype=ROW, size=40.8 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732145998031 2024-11-20T23:40:10,322 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting a5a507cff99243b99ba540126f5931cd, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732146000107 2024-11-20T23:40:10,323 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting ad6b12ec7ab4416395a16566e42844c6, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732146000153 2024-11-20T23:40:10,337 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 11bd138fc4c82dfa760e3563dd81a50e#info#compaction#62 average throughput is 21.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:40:10,337 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/7a797067eba246aa82883b6778da828e is 1080, key is row0001/info:/1732145998031/Put/seqid=0 2024-11-20T23:40:10,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741843_1019 (size=74301) 2024-11-20T23:40:10,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741843_1019 (size=74301) 2024-11-20T23:40:10,352 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/.tmp/info/7a797067eba246aa82883b6778da828e as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/7a797067eba246aa82883b6778da828e 2024-11-20T23:40:10,360 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 11bd138fc4c82dfa760e3563dd81a50e/info of 11bd138fc4c82dfa760e3563dd81a50e into 7a797067eba246aa82883b6778da828e(size=72.6 K), total size for store is 72.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:40:10,360 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 11bd138fc4c82dfa760e3563dd81a50e: 2024-11-20T23:40:10,360 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., storeName=11bd138fc4c82dfa760e3563dd81a50e/info, priority=13, startTime=1732146010320; duration=0sec 2024-11-20T23:40:10,360 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-20T23:40:10,360 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:40:10,360 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-20T23:40:10,360 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:40:10,360 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-20T23:40:10,360 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T23:40:10,361 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:10,361 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:10,361 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 11bd138fc4c82dfa760e3563dd81a50e:info 2024-11-20T23:40:10,375 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43531 {}] assignment.AssignmentManager(1355): Split request from 412a5e44fd2e,37437,1732145986811, parent={ENCODED => 11bd138fc4c82dfa760e3563dd81a50e, NAME => 'TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-20T23:40:10,381 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43531 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=412a5e44fd2e,37437,1732145986811 2024-11-20T23:40:10,385 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43531 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=11bd138fc4c82dfa760e3563dd81a50e, daughterA=49ae1279d535b3aafde3aebf3e15e8c0, daughterB=0d891293336a03b0c41247dd541de035 2024-11-20T23:40:10,386 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=11bd138fc4c82dfa760e3563dd81a50e, daughterA=49ae1279d535b3aafde3aebf3e15e8c0, daughterB=0d891293336a03b0c41247dd541de035 2024-11-20T23:40:10,386 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=11bd138fc4c82dfa760e3563dd81a50e, daughterA=49ae1279d535b3aafde3aebf3e15e8c0, daughterB=0d891293336a03b0c41247dd541de035 2024-11-20T23:40:10,386 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=11bd138fc4c82dfa760e3563dd81a50e, daughterA=49ae1279d535b3aafde3aebf3e15e8c0, daughterB=0d891293336a03b0c41247dd541de035 2024-11-20T23:40:10,394 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=11bd138fc4c82dfa760e3563dd81a50e, UNASSIGN}] 2024-11-20T23:40:10,396 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=11bd138fc4c82dfa760e3563dd81a50e, UNASSIGN 2024-11-20T23:40:10,398 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=11bd138fc4c82dfa760e3563dd81a50e, regionState=CLOSING, regionLocation=412a5e44fd2e,37437,1732145986811 2024-11-20T23:40:10,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=11bd138fc4c82dfa760e3563dd81a50e, UNASSIGN because future has completed 2024-11-20T23:40:10,401 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-20T23:40:10,401 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 11bd138fc4c82dfa760e3563dd81a50e, server=412a5e44fd2e,37437,1732145986811}] 2024-11-20T23:40:10,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:10,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:10,559 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:40:10,559 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-20T23:40:10,560 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 11bd138fc4c82dfa760e3563dd81a50e, disabling compactions & flushes 2024-11-20T23:40:10,560 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:40:10,560 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:40:10,560 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. after waiting 0 ms 2024-11-20T23:40:10,560 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:40:10,561 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/56c3444d613149baa1b0f6a3426b2f49, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/db316cd3b4f54280833821eeac9a3fab, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a4c43e7e44d74ce68c0004ca37127c5a, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/f1ddc137f6f14e179cbf70577c740cbc, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a5a507cff99243b99ba540126f5931cd, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/ad6b12ec7ab4416395a16566e42844c6] to archive 2024-11-20T23:40:10,562 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T23:40:10,564 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/56c3444d613149baa1b0f6a3426b2f49 to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/56c3444d613149baa1b0f6a3426b2f49 2024-11-20T23:40:10,567 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/db316cd3b4f54280833821eeac9a3fab to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/db316cd3b4f54280833821eeac9a3fab 2024-11-20T23:40:10,568 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a4c43e7e44d74ce68c0004ca37127c5a to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a4c43e7e44d74ce68c0004ca37127c5a 2024-11-20T23:40:10,570 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/f1ddc137f6f14e179cbf70577c740cbc to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/f1ddc137f6f14e179cbf70577c740cbc 2024-11-20T23:40:10,571 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a5a507cff99243b99ba540126f5931cd to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/a5a507cff99243b99ba540126f5931cd 2024-11-20T23:40:10,573 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/ad6b12ec7ab4416395a16566e42844c6 to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/ad6b12ec7ab4416395a16566e42844c6 2024-11-20T23:40:10,580 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=1 2024-11-20T23:40:10,581 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. 2024-11-20T23:40:10,581 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 11bd138fc4c82dfa760e3563dd81a50e: Waiting for close lock at 1732146010560Running coprocessor pre-close hooks at 1732146010560Disabling compacts and flushes for region at 1732146010560Disabling writes for close at 1732146010560Writing region close event to WAL at 1732146010575 (+15 ms)Running coprocessor post-close hooks at 1732146010581 (+6 ms)Closed at 1732146010581 2024-11-20T23:40:10,584 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:40:10,585 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=11bd138fc4c82dfa760e3563dd81a50e, regionState=CLOSED 2024-11-20T23:40:10,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 11bd138fc4c82dfa760e3563dd81a50e, server=412a5e44fd2e,37437,1732145986811 because future has completed 2024-11-20T23:40:10,591 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-20T23:40:10,591 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 11bd138fc4c82dfa760e3563dd81a50e, server=412a5e44fd2e,37437,1732145986811 in 187 msec 2024-11-20T23:40:10,593 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-20T23:40:10,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=11bd138fc4c82dfa760e3563dd81a50e, UNASSIGN in 197 msec 2024-11-20T23:40:10,608 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:10,612 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 1 storefiles, region=11bd138fc4c82dfa760e3563dd81a50e, threads=1 2024-11-20T23:40:10,615 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/7a797067eba246aa82883b6778da828e for region: 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:40:10,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741844_1020 (size=27) 2024-11-20T23:40:10,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741844_1020 (size=27) 2024-11-20T23:40:11,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741845_1021 (size=27) 2024-11-20T23:40:11,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741845_1021 (size=27) 2024-11-20T23:40:11,062 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/7a797067eba246aa82883b6778da828e for region: 11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:40:11,065 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 11bd138fc4c82dfa760e3563dd81a50e Daughter A: [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0/info/7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e] storefiles, Daughter B: [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e] storefiles. 2024-11-20T23:40:11,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741846_1022 (size=71) 2024-11-20T23:40:11,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741846_1022 (size=71) 2024-11-20T23:40:11,076 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:11,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741847_1023 (size=71) 2024-11-20T23:40:11,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741847_1023 (size=71) 2024-11-20T23:40:11,093 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:11,102 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-20T23:40:11,104 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-20T23:40:11,106 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732146011106"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732146011106"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732146011106"}]},"ts":"1732146011106"} 2024-11-20T23:40:11,107 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732146011106"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732146011106"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732146011106"}]},"ts":"1732146011106"} 2024-11-20T23:40:11,107 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732146011106"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732146011106"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732146011106"}]},"ts":"1732146011106"} 2024-11-20T23:40:11,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ae1279d535b3aafde3aebf3e15e8c0, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d891293336a03b0c41247dd541de035, ASSIGN}] 2024-11-20T23:40:11,130 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d891293336a03b0c41247dd541de035, ASSIGN 2024-11-20T23:40:11,130 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ae1279d535b3aafde3aebf3e15e8c0, ASSIGN 2024-11-20T23:40:11,131 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d891293336a03b0c41247dd541de035, ASSIGN; state=SPLITTING_NEW, location=412a5e44fd2e,37437,1732145986811; forceNewPlan=false, retain=false 2024-11-20T23:40:11,131 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ae1279d535b3aafde3aebf3e15e8c0, ASSIGN; state=SPLITTING_NEW, location=412a5e44fd2e,37437,1732145986811; forceNewPlan=false, retain=false 2024-11-20T23:40:11,281 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=49ae1279d535b3aafde3aebf3e15e8c0, regionState=OPENING, regionLocation=412a5e44fd2e,37437,1732145986811 2024-11-20T23:40:11,281 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=0d891293336a03b0c41247dd541de035, regionState=OPENING, regionLocation=412a5e44fd2e,37437,1732145986811 2024-11-20T23:40:11,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ae1279d535b3aafde3aebf3e15e8c0, ASSIGN because future has completed 2024-11-20T23:40:11,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 49ae1279d535b3aafde3aebf3e15e8c0, server=412a5e44fd2e,37437,1732145986811}] 2024-11-20T23:40:11,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d891293336a03b0c41247dd541de035, ASSIGN because future has completed 2024-11-20T23:40:11,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0d891293336a03b0c41247dd541de035, server=412a5e44fd2e,37437,1732145986811}] 2024-11-20T23:40:11,441 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:11,441 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 0d891293336a03b0c41247dd541de035, NAME => 'TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-20T23:40:11,441 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:11,441 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:40:11,441 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:11,441 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:11,443 INFO [StoreOpener-0d891293336a03b0c41247dd541de035-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:11,443 INFO [StoreOpener-0d891293336a03b0c41247dd541de035-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d891293336a03b0c41247dd541de035 columnFamilyName info 2024-11-20T23:40:11,443 DEBUG [StoreOpener-0d891293336a03b0c41247dd541de035-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:11,452 DEBUG [StoreOpener-0d891293336a03b0c41247dd541de035-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e->hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/7a797067eba246aa82883b6778da828e-top 2024-11-20T23:40:11,453 INFO [StoreOpener-0d891293336a03b0c41247dd541de035-1 {}] regionserver.HStore(327): Store=0d891293336a03b0c41247dd541de035/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:40:11,453 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:11,453 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035 2024-11-20T23:40:11,455 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035 2024-11-20T23:40:11,455 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:11,455 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:11,456 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:11,457 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 0d891293336a03b0c41247dd541de035; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760457, jitterRate=-0.033030033111572266}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T23:40:11,457 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:11,458 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 0d891293336a03b0c41247dd541de035: Running coprocessor pre-open hook at 1732146011441Writing region info on filesystem at 1732146011441Initializing all the Stores at 1732146011442 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732146011442Cleaning up temporary data from old regions at 1732146011455 (+13 ms)Running coprocessor post-open hooks at 1732146011457 (+2 ms)Region opened successfully at 1732146011458 (+1 ms) 2024-11-20T23:40:11,459 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035., pid=13, masterSystemTime=1732146011438 2024-11-20T23:40:11,459 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 0d891293336a03b0c41247dd541de035:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:40:11,459 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-20T23:40:11,459 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:11,459 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:11,459 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1541): 0d891293336a03b0c41247dd541de035/info is initiating minor compaction (all files) 2024-11-20T23:40:11,460 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0d891293336a03b0c41247dd541de035/info in TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:11,460 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e->hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/7a797067eba246aa82883b6778da828e-top] into tmpdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp, totalSize=72.6 K 2024-11-20T23:40:11,460 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732145998031 2024-11-20T23:40:11,461 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:11,461 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:11,461 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0. 2024-11-20T23:40:11,461 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 49ae1279d535b3aafde3aebf3e15e8c0, NAME => 'TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-20T23:40:11,462 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:11,462 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:40:11,462 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:11,462 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:11,462 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=0d891293336a03b0c41247dd541de035, regionState=OPEN, openSeqNum=86, regionLocation=412a5e44fd2e,37437,1732145986811 2024-11-20T23:40:11,463 INFO [StoreOpener-49ae1279d535b3aafde3aebf3e15e8c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:11,464 INFO [StoreOpener-49ae1279d535b3aafde3aebf3e15e8c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 49ae1279d535b3aafde3aebf3e15e8c0 columnFamilyName info 2024-11-20T23:40:11,464 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-20T23:40:11,464 DEBUG [StoreOpener-49ae1279d535b3aafde3aebf3e15e8c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:11,464 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-20T23:40:11,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-20T23:40:11,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0d891293336a03b0c41247dd541de035, server=412a5e44fd2e,37437,1732145986811 because future has completed 2024-11-20T23:40:11,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-20T23:40:11,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 0d891293336a03b0c41247dd541de035, server=412a5e44fd2e,37437,1732145986811 in 180 msec 2024-11-20T23:40:11,471 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d891293336a03b0c41247dd541de035, ASSIGN in 341 msec 2024-11-20T23:40:11,473 DEBUG [StoreOpener-49ae1279d535b3aafde3aebf3e15e8c0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0/info/7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e->hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/7a797067eba246aa82883b6778da828e-bottom 2024-11-20T23:40:11,474 INFO [StoreOpener-49ae1279d535b3aafde3aebf3e15e8c0-1 {}] regionserver.HStore(327): Store=49ae1279d535b3aafde3aebf3e15e8c0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:40:11,474 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:11,475 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:11,476 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:11,476 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:11,476 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:11,477 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:11,478 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 49ae1279d535b3aafde3aebf3e15e8c0; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692164, jitterRate=-0.11986859142780304}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T23:40:11,478 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:11,478 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 49ae1279d535b3aafde3aebf3e15e8c0: Running coprocessor pre-open hook at 1732146011462Writing region info on filesystem at 1732146011462Initializing all the Stores at 1732146011463 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732146011463Cleaning up temporary data from old regions at 1732146011476 (+13 ms)Running coprocessor post-open hooks at 1732146011478 (+2 ms)Region opened successfully at 1732146011478 2024-11-20T23:40:11,479 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d891293336a03b0c41247dd541de035#info#compaction#63 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:40:11,479 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0., pid=12, masterSystemTime=1732146011438 2024-11-20T23:40:11,479 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 49ae1279d535b3aafde3aebf3e15e8c0:info, priority=-2147483648, current under compaction store size is 2 2024-11-20T23:40:11,479 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:11,479 DEBUG [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-20T23:40:11,479 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/4af20445003746b692921f8dbf152a1d is 1080, key is row0062/info:/1732146000185/Put/seqid=0 2024-11-20T23:40:11,480 INFO [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0. 2024-11-20T23:40:11,480 DEBUG [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] regionserver.HStore(1541): 49ae1279d535b3aafde3aebf3e15e8c0/info is initiating minor compaction (all files) 2024-11-20T23:40:11,480 INFO [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ae1279d535b3aafde3aebf3e15e8c0/info in TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0. 2024-11-20T23:40:11,480 INFO [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0/info/7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e->hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/7a797067eba246aa82883b6778da828e-bottom] into tmpdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0/.tmp, totalSize=72.6 K 2024-11-20T23:40:11,481 DEBUG [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] compactions.Compactor(225): Compacting 7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732145998031 2024-11-20T23:40:11,481 DEBUG [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0. 2024-11-20T23:40:11,481 INFO [RS_OPEN_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0. 2024-11-20T23:40:11,482 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=49ae1279d535b3aafde3aebf3e15e8c0, regionState=OPEN, openSeqNum=86, regionLocation=412a5e44fd2e,37437,1732145986811 2024-11-20T23:40:11,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 49ae1279d535b3aafde3aebf3e15e8c0, server=412a5e44fd2e,37437,1732145986811 because future has completed 2024-11-20T23:40:11,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/.tmp/info/25cfc21d337745cf9c88675d909d29a1 is 193, key is TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035./info:regioninfo/1732146011462/Put/seqid=0 2024-11-20T23:40:11,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741848_1024 (size=8260) 2024-11-20T23:40:11,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741848_1024 (size=8260) 2024-11-20T23:40:11,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-20T23:40:11,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 49ae1279d535b3aafde3aebf3e15e8c0, server=412a5e44fd2e,37437,1732145986811 in 204 msec 2024-11-20T23:40:11,493 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-20T23:40:11,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ae1279d535b3aafde3aebf3e15e8c0, ASSIGN in 364 msec 2024-11-20T23:40:11,495 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=11bd138fc4c82dfa760e3563dd81a50e, daughterA=49ae1279d535b3aafde3aebf3e15e8c0, daughterB=0d891293336a03b0c41247dd541de035 in 1.1130 sec 2024-11-20T23:40:11,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:11,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:11,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741849_1025 (size=9882) 2024-11-20T23:40:11,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741849_1025 (size=9882) 2024-11-20T23:40:11,505 INFO [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ae1279d535b3aafde3aebf3e15e8c0#info#compaction#65 average throughput is 15.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:40:11,506 DEBUG [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0/.tmp/info/86f9c01ce7784da8ad7dd8616bcaf279 is 1080, key is row0001/info:/1732145998031/Put/seqid=0 2024-11-20T23:40:11,506 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/4af20445003746b692921f8dbf152a1d as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/4af20445003746b692921f8dbf152a1d 2024-11-20T23:40:11,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741850_1026 (size=70862) 2024-11-20T23:40:11,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741850_1026 (size=70862) 2024-11-20T23:40:11,512 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-20T23:40:11,513 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 0d891293336a03b0c41247dd541de035/info of 0d891293336a03b0c41247dd541de035 into 4af20445003746b692921f8dbf152a1d(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:40:11,513 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:11,513 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035., storeName=0d891293336a03b0c41247dd541de035/info, priority=15, startTime=1732146011459; duration=0sec 2024-11-20T23:40:11,513 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:11,513 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d891293336a03b0c41247dd541de035:info 2024-11-20T23:40:11,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/.tmp/info/25cfc21d337745cf9c88675d909d29a1 2024-11-20T23:40:11,921 DEBUG [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0/.tmp/info/86f9c01ce7784da8ad7dd8616bcaf279 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0/info/86f9c01ce7784da8ad7dd8616bcaf279 2024-11-20T23:40:11,927 INFO [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 49ae1279d535b3aafde3aebf3e15e8c0/info of 49ae1279d535b3aafde3aebf3e15e8c0 into 86f9c01ce7784da8ad7dd8616bcaf279(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:40:11,927 DEBUG [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ae1279d535b3aafde3aebf3e15e8c0: 2024-11-20T23:40:11,927 INFO [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0., storeName=49ae1279d535b3aafde3aebf3e15e8c0/info, priority=15, startTime=1732146011479; duration=0sec 2024-11-20T23:40:11,927 DEBUG [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:11,927 DEBUG [RS:0;412a5e44fd2e:37437-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ae1279d535b3aafde3aebf3e15e8c0:info 2024-11-20T23:40:11,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/.tmp/ns/29701d112648477ebbb62b95645a5023 is 43, key is default/ns:d/1732145987876/Put/seqid=0 2024-11-20T23:40:11,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741851_1027 (size=5153) 2024-11-20T23:40:11,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741851_1027 (size=5153) 2024-11-20T23:40:11,937 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/.tmp/ns/29701d112648477ebbb62b95645a5023 2024-11-20T23:40:11,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/.tmp/table/d647ed36afa04e4c8a184309c0c6982e is 65, key is TestLogRolling-testLogRolling/table:state/1732145988317/Put/seqid=0 2024-11-20T23:40:11,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741852_1028 (size=5340) 2024-11-20T23:40:11,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741852_1028 (size=5340) 2024-11-20T23:40:11,961 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/.tmp/table/d647ed36afa04e4c8a184309c0c6982e 2024-11-20T23:40:11,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/.tmp/info/25cfc21d337745cf9c88675d909d29a1 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/info/25cfc21d337745cf9c88675d909d29a1 2024-11-20T23:40:11,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/info/25cfc21d337745cf9c88675d909d29a1, entries=30, sequenceid=17, filesize=9.7 K 2024-11-20T23:40:11,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/.tmp/ns/29701d112648477ebbb62b95645a5023 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/ns/29701d112648477ebbb62b95645a5023 2024-11-20T23:40:11,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/ns/29701d112648477ebbb62b95645a5023, entries=2, sequenceid=17, filesize=5.0 K 2024-11-20T23:40:11,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/.tmp/table/d647ed36afa04e4c8a184309c0c6982e as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/table/d647ed36afa04e4c8a184309c0c6982e 2024-11-20T23:40:11,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/table/d647ed36afa04e4c8a184309c0c6982e, entries=2, sequenceid=17, filesize=5.2 K 2024-11-20T23:40:11,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 520ms, sequenceid=17, compaction requested=false 2024-11-20T23:40:11,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-20T23:40:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34318 deadline: 1732146022297, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. is not online on 412a5e44fd2e,37437,1732145986811 2024-11-20T23:40:12,299 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., hostname=412a5e44fd2e,37437,1732145986811, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., hostname=412a5e44fd2e,37437,1732145986811, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. is not online on 412a5e44fd2e,37437,1732145986811 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T23:40:12,299 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., hostname=412a5e44fd2e,37437,1732145986811, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e. is not online on 412a5e44fd2e,37437,1732145986811 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T23:40:12,299 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732145987954.11bd138fc4c82dfa760e3563dd81a50e., hostname=412a5e44fd2e,37437,1732145986811, seqNum=2 from cache 2024-11-20T23:40:12,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:12,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:13,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:13,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:14,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:14,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:15,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:15,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:15,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:15,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,114 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T23:40:16,116 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,116 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,117 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,119 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,119 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,120 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,121 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,145 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,145 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,145 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:16,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:16,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:16,620 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T23:40:17,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:17,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:18,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:18,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:19,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:19,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:20,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:20,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:21,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:21,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:22,382 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035., hostname=412a5e44fd2e,37437,1732145986811, seqNum=86] 2024-11-20T23:40:22,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:22,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T23:40:22,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/fd973901f0e1460dae23cbdb99abcfdb is 1080, key is row0065/info:/1732146022383/Put/seqid=0 2024-11-20T23:40:22,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741853_1029 (size=12509) 2024-11-20T23:40:22,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741853_1029 (size=12509) 2024-11-20T23:40:22,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/fd973901f0e1460dae23cbdb99abcfdb 2024-11-20T23:40:22,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/fd973901f0e1460dae23cbdb99abcfdb as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/fd973901f0e1460dae23cbdb99abcfdb 2024-11-20T23:40:22,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/fd973901f0e1460dae23cbdb99abcfdb, entries=7, sequenceid=96, filesize=12.2 K 2024-11-20T23:40:22,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 0d891293336a03b0c41247dd541de035 in 24ms, sequenceid=96, compaction requested=false 2024-11-20T23:40:22,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:22,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:22,421 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-20T23:40:22,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/07506591607c4b15888c0ca29a7d5a2d is 1080, key is row0072/info:/1732146022398/Put/seqid=0 2024-11-20T23:40:22,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741854_1030 (size=16817) 2024-11-20T23:40:22,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741854_1030 (size=16817) 2024-11-20T23:40:22,445 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/07506591607c4b15888c0ca29a7d5a2d 2024-11-20T23:40:22,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/07506591607c4b15888c0ca29a7d5a2d as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/07506591607c4b15888c0ca29a7d5a2d 2024-11-20T23:40:22,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/07506591607c4b15888c0ca29a7d5a2d, entries=11, sequenceid=110, filesize=16.4 K 2024-11-20T23:40:22,460 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for 0d891293336a03b0c41247dd541de035 in 38ms, sequenceid=110, compaction requested=true 2024-11-20T23:40:22,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:22,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d891293336a03b0c41247dd541de035:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:40:22,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:22,460 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:40:22,461 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37586 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:40:22,461 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1541): 0d891293336a03b0c41247dd541de035/info is initiating minor compaction (all files) 2024-11-20T23:40:22,461 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0d891293336a03b0c41247dd541de035/info in TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:22,461 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/4af20445003746b692921f8dbf152a1d, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/fd973901f0e1460dae23cbdb99abcfdb, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/07506591607c4b15888c0ca29a7d5a2d] into tmpdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp, totalSize=36.7 K 2024-11-20T23:40:22,462 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4af20445003746b692921f8dbf152a1d, keycount=3, bloomtype=ROW, size=8.1 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732146000185 2024-11-20T23:40:22,462 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting fd973901f0e1460dae23cbdb99abcfdb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732146022383 2024-11-20T23:40:22,463 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 07506591607c4b15888c0ca29a7d5a2d, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1732146022398 2024-11-20T23:40:22,474 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d891293336a03b0c41247dd541de035#info#compaction#70 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:40:22,475 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/d730df1a49ff4d34852de2ee49dca4aa is 1080, key is row0062/info:/1732146000185/Put/seqid=0 2024-11-20T23:40:22,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741855_1031 (size=27778) 2024-11-20T23:40:22,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741855_1031 (size=27778) 2024-11-20T23:40:22,491 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/d730df1a49ff4d34852de2ee49dca4aa as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/d730df1a49ff4d34852de2ee49dca4aa 2024-11-20T23:40:22,498 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0d891293336a03b0c41247dd541de035/info of 0d891293336a03b0c41247dd541de035 into d730df1a49ff4d34852de2ee49dca4aa(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:40:22,498 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:22,498 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035., storeName=0d891293336a03b0c41247dd541de035/info, priority=13, startTime=1732146022460; duration=0sec 2024-11-20T23:40:22,498 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:22,498 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d891293336a03b0c41247dd541de035:info 2024-11-20T23:40:22,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:22,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:23,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:23,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:24,460 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-20T23:40:24,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/a90eba00562846dd971f6f809abf77ea is 1080, key is row0083/info:/1732146022422/Put/seqid=0 2024-11-20T23:40:24,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741856_1032 (size=21142) 2024-11-20T23:40:24,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741856_1032 (size=21142) 2024-11-20T23:40:24,472 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/a90eba00562846dd971f6f809abf77ea 2024-11-20T23:40:24,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/a90eba00562846dd971f6f809abf77ea as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/a90eba00562846dd971f6f809abf77ea 2024-11-20T23:40:24,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/a90eba00562846dd971f6f809abf77ea, entries=15, sequenceid=129, filesize=20.6 K 2024-11-20T23:40:24,486 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for 0d891293336a03b0c41247dd541de035 in 27ms, sequenceid=129, compaction requested=false 2024-11-20T23:40:24,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:24,489 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-20T23:40:24,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/aa5dac8b3db34e478df78614250e2faa is 1080, key is row0098/info:/1732146024461/Put/seqid=0 2024-11-20T23:40:24,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741857_1033 (size=17906) 2024-11-20T23:40:24,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741857_1033 (size=17906) 2024-11-20T23:40:24,503 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/aa5dac8b3db34e478df78614250e2faa 2024-11-20T23:40:24,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:24,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:24,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/aa5dac8b3db34e478df78614250e2faa as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/aa5dac8b3db34e478df78614250e2faa 2024-11-20T23:40:24,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/aa5dac8b3db34e478df78614250e2faa, entries=12, sequenceid=144, filesize=17.5 K 2024-11-20T23:40:24,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for 0d891293336a03b0c41247dd541de035 in 26ms, sequenceid=144, compaction requested=true 2024-11-20T23:40:24,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:24,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d891293336a03b0c41247dd541de035:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:40:24,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:24,516 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:40:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:24,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-20T23:40:24,517 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 66826 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:40:24,517 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1541): 0d891293336a03b0c41247dd541de035/info is initiating minor compaction (all files) 2024-11-20T23:40:24,517 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0d891293336a03b0c41247dd541de035/info in TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:24,517 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/d730df1a49ff4d34852de2ee49dca4aa, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/a90eba00562846dd971f6f809abf77ea, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/aa5dac8b3db34e478df78614250e2faa] into tmpdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp, totalSize=65.3 K 2024-11-20T23:40:24,518 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting d730df1a49ff4d34852de2ee49dca4aa, keycount=21, bloomtype=ROW, size=27.1 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1732146000185 2024-11-20T23:40:24,519 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting a90eba00562846dd971f6f809abf77ea, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732146022422 2024-11-20T23:40:24,519 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting aa5dac8b3db34e478df78614250e2faa, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732146024461 2024-11-20T23:40:24,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/92b713893da440e099582584d3671531 is 1080, key is row0110/info:/1732146024490/Put/seqid=0 2024-11-20T23:40:24,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741858_1034 (size=17906) 2024-11-20T23:40:24,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741858_1034 (size=17906) 2024-11-20T23:40:24,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/92b713893da440e099582584d3671531 2024-11-20T23:40:24,534 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d891293336a03b0c41247dd541de035#info#compaction#74 average throughput is 24.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:40:24,534 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/ea226fe4bc70407681e4c9fcc2d05f28 is 1080, key is row0062/info:/1732146000185/Put/seqid=0 2024-11-20T23:40:24,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/92b713893da440e099582584d3671531 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/92b713893da440e099582584d3671531 2024-11-20T23:40:24,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/92b713893da440e099582584d3671531, entries=12, sequenceid=159, filesize=17.5 K 2024-11-20T23:40:24,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for 0d891293336a03b0c41247dd541de035 in 25ms, sequenceid=159, compaction requested=false 2024-11-20T23:40:24,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:24,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741859_1035 (size=57012) 2024-11-20T23:40:24,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741859_1035 (size=57012) 2024-11-20T23:40:24,548 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/ea226fe4bc70407681e4c9fcc2d05f28 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/ea226fe4bc70407681e4c9fcc2d05f28 2024-11-20T23:40:24,555 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0d891293336a03b0c41247dd541de035/info of 0d891293336a03b0c41247dd541de035 into ea226fe4bc70407681e4c9fcc2d05f28(size=55.7 K), total size for store is 73.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:40:24,555 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:24,555 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035., storeName=0d891293336a03b0c41247dd541de035/info, priority=13, startTime=1732146024516; duration=0sec 2024-11-20T23:40:24,555 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:24,555 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d891293336a03b0c41247dd541de035:info 2024-11-20T23:40:25,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:25,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:26,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:26,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:26,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:26,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-20T23:40:26,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/d5d52cd6fb0e46868bb9ab12bf3af8ec is 1080, key is row0122/info:/1732146024517/Put/seqid=0 2024-11-20T23:40:26,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741860_1036 (size=13594) 2024-11-20T23:40:26,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741860_1036 (size=13594) 2024-11-20T23:40:26,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/d5d52cd6fb0e46868bb9ab12bf3af8ec 2024-11-20T23:40:26,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/d5d52cd6fb0e46868bb9ab12bf3af8ec as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/d5d52cd6fb0e46868bb9ab12bf3af8ec 2024-11-20T23:40:26,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/d5d52cd6fb0e46868bb9ab12bf3af8ec, entries=8, sequenceid=171, filesize=13.3 K 2024-11-20T23:40:26,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for 0d891293336a03b0c41247dd541de035 in 22ms, sequenceid=171, compaction requested=true 2024-11-20T23:40:26,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:26,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d891293336a03b0c41247dd541de035:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:40:26,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:26,558 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:40:26,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:26,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-20T23:40:26,561 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 88512 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:40:26,561 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1541): 0d891293336a03b0c41247dd541de035/info is initiating minor compaction (all files) 2024-11-20T23:40:26,561 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0d891293336a03b0c41247dd541de035/info in TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:26,561 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/ea226fe4bc70407681e4c9fcc2d05f28, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/92b713893da440e099582584d3671531, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/d5d52cd6fb0e46868bb9ab12bf3af8ec] into tmpdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp, totalSize=86.4 K 2024-11-20T23:40:26,562 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting ea226fe4bc70407681e4c9fcc2d05f28, keycount=48, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732146000185 2024-11-20T23:40:26,562 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 92b713893da440e099582584d3671531, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732146024490 2024-11-20T23:40:26,562 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting d5d52cd6fb0e46868bb9ab12bf3af8ec, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732146024517 2024-11-20T23:40:26,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/e4485c412244485d88db2d01b602757a is 1080, key is row0130/info:/1732146026536/Put/seqid=0 2024-11-20T23:40:26,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741861_1037 (size=16828) 2024-11-20T23:40:26,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741861_1037 (size=16828) 2024-11-20T23:40:26,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/e4485c412244485d88db2d01b602757a 2024-11-20T23:40:26,580 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d891293336a03b0c41247dd541de035#info#compaction#77 average throughput is 23.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:40:26,580 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/18db0ca16b8841d68d2d81a43ad926a8 is 1080, key is row0062/info:/1732146000185/Put/seqid=0 2024-11-20T23:40:26,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/e4485c412244485d88db2d01b602757a as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/e4485c412244485d88db2d01b602757a 2024-11-20T23:40:26,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741862_1038 (size=78811) 2024-11-20T23:40:26,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741862_1038 (size=78811) 2024-11-20T23:40:26,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/e4485c412244485d88db2d01b602757a, entries=11, sequenceid=185, filesize=16.4 K 2024-11-20T23:40:26,610 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/18db0ca16b8841d68d2d81a43ad926a8 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/18db0ca16b8841d68d2d81a43ad926a8 2024-11-20T23:40:26,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for 0d891293336a03b0c41247dd541de035 in 52ms, sequenceid=185, compaction requested=false 2024-11-20T23:40:26,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:26,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:26,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-20T23:40:26,618 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0d891293336a03b0c41247dd541de035/info of 0d891293336a03b0c41247dd541de035 into 18db0ca16b8841d68d2d81a43ad926a8(size=77.0 K), total size for store is 93.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:40:26,618 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:26,618 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035., storeName=0d891293336a03b0c41247dd541de035/info, priority=13, startTime=1732146026558; duration=0sec 2024-11-20T23:40:26,618 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:26,618 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d891293336a03b0c41247dd541de035:info 2024-11-20T23:40:26,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/eeba59b794a1440f8c07597877fb407a is 1080, key is row0141/info:/1732146026559/Put/seqid=0 2024-11-20T23:40:26,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741863_1039 (size=21156) 2024-11-20T23:40:26,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741863_1039 (size=21156) 2024-11-20T23:40:26,629 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/eeba59b794a1440f8c07597877fb407a 2024-11-20T23:40:26,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/eeba59b794a1440f8c07597877fb407a as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/eeba59b794a1440f8c07597877fb407a 2024-11-20T23:40:26,642 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/eeba59b794a1440f8c07597877fb407a, entries=15, sequenceid=203, filesize=20.7 K 2024-11-20T23:40:26,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=5.25 KB/5380 for 0d891293336a03b0c41247dd541de035 in 31ms, sequenceid=203, compaction requested=true 2024-11-20T23:40:26,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:26,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d891293336a03b0c41247dd541de035:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:40:26,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:26,644 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:40:26,645 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 116795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:40:26,645 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1541): 0d891293336a03b0c41247dd541de035/info is initiating minor compaction (all files) 2024-11-20T23:40:26,646 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0d891293336a03b0c41247dd541de035/info in TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:26,646 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/18db0ca16b8841d68d2d81a43ad926a8, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/e4485c412244485d88db2d01b602757a, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/eeba59b794a1440f8c07597877fb407a] into tmpdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp, totalSize=114.1 K 2024-11-20T23:40:26,646 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 18db0ca16b8841d68d2d81a43ad926a8, keycount=68, bloomtype=ROW, size=77.0 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732146000185 2024-11-20T23:40:26,646 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting e4485c412244485d88db2d01b602757a, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732146026536 2024-11-20T23:40:26,647 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting eeba59b794a1440f8c07597877fb407a, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732146026559 2024-11-20T23:40:26,661 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d891293336a03b0c41247dd541de035#info#compaction#79 average throughput is 32.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:40:26,661 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/753e6bc789c84311b2ff172d3254a824 is 1080, key is row0062/info:/1732146000185/Put/seqid=0 2024-11-20T23:40:26,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741864_1040 (size=106949) 2024-11-20T23:40:26,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741864_1040 (size=106949) 2024-11-20T23:40:26,672 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/753e6bc789c84311b2ff172d3254a824 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/753e6bc789c84311b2ff172d3254a824 2024-11-20T23:40:26,679 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0d891293336a03b0c41247dd541de035/info of 0d891293336a03b0c41247dd541de035 into 753e6bc789c84311b2ff172d3254a824(size=104.4 K), total size for store is 104.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:40:26,680 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:26,680 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035., storeName=0d891293336a03b0c41247dd541de035/info, priority=13, startTime=1732146026644; duration=0sec 2024-11-20T23:40:26,680 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:26,680 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d891293336a03b0c41247dd541de035:info 2024-11-20T23:40:27,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:27,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:27,893 INFO [master/412a5e44fd2e:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T23:40:27,893 INFO [master/412a5e44fd2e:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T23:40:28,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:28,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:28,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:28,628 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T23:40:28,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/96447f1217844441be3fa74bda682a9a is 1080, key is row0156/info:/1732146026615/Put/seqid=0 2024-11-20T23:40:28,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741865_1041 (size=12516) 2024-11-20T23:40:28,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741865_1041 (size=12516) 2024-11-20T23:40:28,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/96447f1217844441be3fa74bda682a9a 2024-11-20T23:40:28,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/96447f1217844441be3fa74bda682a9a as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/96447f1217844441be3fa74bda682a9a 2024-11-20T23:40:28,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/96447f1217844441be3fa74bda682a9a, entries=7, sequenceid=215, filesize=12.2 K 2024-11-20T23:40:28,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 0d891293336a03b0c41247dd541de035 in 26ms, sequenceid=215, compaction requested=false 2024-11-20T23:40:28,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:28,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:28,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-20T23:40:28,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/761e49ff1c754210aa2b05cf6514e99f is 1080, key is row0163/info:/1732146028630/Put/seqid=0 2024-11-20T23:40:28,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741866_1042 (size=19000) 2024-11-20T23:40:28,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/761e49ff1c754210aa2b05cf6514e99f 2024-11-20T23:40:28,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741866_1042 (size=19000) 2024-11-20T23:40:28,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/761e49ff1c754210aa2b05cf6514e99f as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/761e49ff1c754210aa2b05cf6514e99f 2024-11-20T23:40:28,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/761e49ff1c754210aa2b05cf6514e99f, entries=13, sequenceid=231, filesize=18.6 K 2024-11-20T23:40:28,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 0d891293336a03b0c41247dd541de035 in 31ms, sequenceid=231, compaction requested=true 2024-11-20T23:40:28,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:28,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d891293336a03b0c41247dd541de035:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:40:28,686 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:40:28,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:28,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:28,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-20T23:40:28,688 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 138465 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:40:28,688 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1541): 0d891293336a03b0c41247dd541de035/info is initiating minor compaction (all files) 2024-11-20T23:40:28,688 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0d891293336a03b0c41247dd541de035/info in TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:28,688 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/753e6bc789c84311b2ff172d3254a824, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/96447f1217844441be3fa74bda682a9a, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/761e49ff1c754210aa2b05cf6514e99f] into tmpdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp, totalSize=135.2 K 2024-11-20T23:40:28,688 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 753e6bc789c84311b2ff172d3254a824, keycount=94, bloomtype=ROW, size=104.4 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732146000185 2024-11-20T23:40:28,689 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 96447f1217844441be3fa74bda682a9a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732146026615 2024-11-20T23:40:28,689 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 761e49ff1c754210aa2b05cf6514e99f, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1732146028630 2024-11-20T23:40:28,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/6981dd045b5b49308d2c5f6ace26ae12 is 1080, key is row0176/info:/1732146028657/Put/seqid=0 2024-11-20T23:40:28,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741867_1043 (size=19000) 2024-11-20T23:40:28,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741867_1043 (size=19000) 2024-11-20T23:40:28,699 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/6981dd045b5b49308d2c5f6ace26ae12 2024-11-20T23:40:28,703 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d891293336a03b0c41247dd541de035#info#compaction#83 average throughput is 38.99 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:40:28,704 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/607f5ef1e8eb4e03841f7adc5915d3bc is 1080, key is row0062/info:/1732146000185/Put/seqid=0 2024-11-20T23:40:28,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/6981dd045b5b49308d2c5f6ace26ae12 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/6981dd045b5b49308d2c5f6ace26ae12 2024-11-20T23:40:28,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741868_1044 (size=128743) 2024-11-20T23:40:28,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741868_1044 (size=128743) 2024-11-20T23:40:28,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/6981dd045b5b49308d2c5f6ace26ae12, entries=13, sequenceid=247, filesize=18.6 K 2024-11-20T23:40:28,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for 0d891293336a03b0c41247dd541de035 in 23ms, sequenceid=247, compaction requested=false 2024-11-20T23:40:28,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:28,711 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/607f5ef1e8eb4e03841f7adc5915d3bc as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/607f5ef1e8eb4e03841f7adc5915d3bc 2024-11-20T23:40:28,718 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0d891293336a03b0c41247dd541de035/info of 0d891293336a03b0c41247dd541de035 into 607f5ef1e8eb4e03841f7adc5915d3bc(size=125.7 K), total size for store is 144.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:40:28,718 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:28,718 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035., storeName=0d891293336a03b0c41247dd541de035/info, priority=13, startTime=1732146028686; duration=0sec 2024-11-20T23:40:28,718 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:28,718 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d891293336a03b0c41247dd541de035:info 2024-11-20T23:40:29,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:29,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:30,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:30,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:30,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:30,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T23:40:30,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/83d5854640bf4246996e14056386749a is 1080, key is row0189/info:/1732146028689/Put/seqid=0 2024-11-20T23:40:30,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741869_1045 (size=12518) 2024-11-20T23:40:30,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741869_1045 (size=12518) 2024-11-20T23:40:30,715 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/83d5854640bf4246996e14056386749a 2024-11-20T23:40:30,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/83d5854640bf4246996e14056386749a as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/83d5854640bf4246996e14056386749a 2024-11-20T23:40:30,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/83d5854640bf4246996e14056386749a, entries=7, sequenceid=258, filesize=12.2 K 2024-11-20T23:40:30,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 0d891293336a03b0c41247dd541de035 in 24ms, sequenceid=258, compaction requested=true 2024-11-20T23:40:30,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:30,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d891293336a03b0c41247dd541de035:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:40:30,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:30,729 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:40:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:30,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-20T23:40:30,730 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 160261 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:40:30,730 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1541): 0d891293336a03b0c41247dd541de035/info is initiating minor compaction (all files) 2024-11-20T23:40:30,730 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0d891293336a03b0c41247dd541de035/info in TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:30,730 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/607f5ef1e8eb4e03841f7adc5915d3bc, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/6981dd045b5b49308d2c5f6ace26ae12, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/83d5854640bf4246996e14056386749a] into tmpdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp, totalSize=156.5 K 2024-11-20T23:40:30,730 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 607f5ef1e8eb4e03841f7adc5915d3bc, keycount=114, bloomtype=ROW, size=125.7 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1732146000185 2024-11-20T23:40:30,731 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6981dd045b5b49308d2c5f6ace26ae12, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732146028657 2024-11-20T23:40:30,731 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 83d5854640bf4246996e14056386749a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1732146028689 2024-11-20T23:40:30,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/5bdf2530aa1d4219850d5b66a7418793 is 1080, key is row0196/info:/1732146030705/Put/seqid=0 2024-11-20T23:40:30,747 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d891293336a03b0c41247dd541de035#info#compaction#86 average throughput is 68.75 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:40:30,748 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/8bb4221e77e449f9b05556bdd40564e8 is 1080, key is row0062/info:/1732146000185/Put/seqid=0 2024-11-20T23:40:30,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741870_1046 (size=16839) 2024-11-20T23:40:30,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741870_1046 (size=16839) 2024-11-20T23:40:30,753 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/5bdf2530aa1d4219850d5b66a7418793 2024-11-20T23:40:30,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741871_1047 (size=150480) 2024-11-20T23:40:30,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741871_1047 (size=150480) 2024-11-20T23:40:30,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/5bdf2530aa1d4219850d5b66a7418793 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/5bdf2530aa1d4219850d5b66a7418793 2024-11-20T23:40:30,758 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/8bb4221e77e449f9b05556bdd40564e8 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/8bb4221e77e449f9b05556bdd40564e8 2024-11-20T23:40:30,763 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/5bdf2530aa1d4219850d5b66a7418793, entries=11, sequenceid=272, filesize=16.4 K 2024-11-20T23:40:30,764 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for 0d891293336a03b0c41247dd541de035 in 35ms, sequenceid=272, compaction requested=false 2024-11-20T23:40:30,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:30,764 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0d891293336a03b0c41247dd541de035/info of 0d891293336a03b0c41247dd541de035 into 8bb4221e77e449f9b05556bdd40564e8(size=147.0 K), total size for store is 163.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:40:30,764 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:30,764 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035., storeName=0d891293336a03b0c41247dd541de035/info, priority=13, startTime=1732146030728; duration=0sec 2024-11-20T23:40:30,764 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-20T23:40:30,764 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:30,764 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d891293336a03b0c41247dd541de035:info 2024-11-20T23:40:30,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/0367d6ac66f14480b267e5faa3038b56 is 1080, key is row0207/info:/1732146030731/Put/seqid=0 2024-11-20T23:40:30,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741872_1048 (size=21171) 2024-11-20T23:40:30,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741872_1048 (size=21171) 2024-11-20T23:40:30,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/0367d6ac66f14480b267e5faa3038b56 2024-11-20T23:40:30,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/0367d6ac66f14480b267e5faa3038b56 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/0367d6ac66f14480b267e5faa3038b56 2024-11-20T23:40:30,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/0367d6ac66f14480b267e5faa3038b56, entries=15, sequenceid=291, filesize=20.7 K 2024-11-20T23:40:30,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=3.15 KB/3228 for 0d891293336a03b0c41247dd541de035 in 30ms, sequenceid=291, compaction requested=true 2024-11-20T23:40:30,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:30,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d891293336a03b0c41247dd541de035:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:40:30,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:30,794 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:40:30,796 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 188490 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:40:30,796 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1541): 0d891293336a03b0c41247dd541de035/info is initiating minor compaction (all files) 2024-11-20T23:40:30,796 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0d891293336a03b0c41247dd541de035/info in TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:30,796 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/8bb4221e77e449f9b05556bdd40564e8, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/5bdf2530aa1d4219850d5b66a7418793, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/0367d6ac66f14480b267e5faa3038b56] into tmpdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp, totalSize=184.1 K 2024-11-20T23:40:30,796 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8bb4221e77e449f9b05556bdd40564e8, keycount=134, bloomtype=ROW, size=147.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1732146000185 2024-11-20T23:40:30,797 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5bdf2530aa1d4219850d5b66a7418793, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732146030705 2024-11-20T23:40:30,797 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0367d6ac66f14480b267e5faa3038b56, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732146030731 2024-11-20T23:40:30,811 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d891293336a03b0c41247dd541de035#info#compaction#88 average throughput is 54.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:40:30,811 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/bd7bbb7b33eb4075b4c68a6403228d1c is 1080, key is row0062/info:/1732146000185/Put/seqid=0 2024-11-20T23:40:30,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741873_1049 (size=178644) 2024-11-20T23:40:30,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741873_1049 (size=178644) 2024-11-20T23:40:30,825 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/bd7bbb7b33eb4075b4c68a6403228d1c as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/bd7bbb7b33eb4075b4c68a6403228d1c 2024-11-20T23:40:30,832 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0d891293336a03b0c41247dd541de035/info of 0d891293336a03b0c41247dd541de035 into bd7bbb7b33eb4075b4c68a6403228d1c(size=174.5 K), total size for store is 174.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:40:30,832 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:30,832 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035., storeName=0d891293336a03b0c41247dd541de035/info, priority=13, startTime=1732146030794; duration=0sec 2024-11-20T23:40:30,832 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:30,832 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d891293336a03b0c41247dd541de035:info 2024-11-20T23:40:31,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:31,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:32,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:32,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:32,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:32,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T23:40:32,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/119f9a525620439aa4bb13621e55a58d is 1080, key is row0222/info:/1732146030766/Put/seqid=0 2024-11-20T23:40:32,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741874_1050 (size=12523) 2024-11-20T23:40:32,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741874_1050 (size=12523) 2024-11-20T23:40:32,788 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/119f9a525620439aa4bb13621e55a58d 2024-11-20T23:40:32,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/119f9a525620439aa4bb13621e55a58d as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/119f9a525620439aa4bb13621e55a58d 2024-11-20T23:40:32,800 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-20T23:40:32,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/119f9a525620439aa4bb13621e55a58d, entries=7, sequenceid=302, filesize=12.2 K 2024-11-20T23:40:32,802 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 0d891293336a03b0c41247dd541de035 in 23ms, sequenceid=302, compaction requested=false 2024-11-20T23:40:32,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:32,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37437 {}] regionserver.HRegion(8855): Flush requested on 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:32,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-20T23:40:32,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/abf0477033084415b727d55b81cd4e0e is 1080, key is row0229/info:/1732146032780/Put/seqid=0 2024-11-20T23:40:32,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741875_1051 (size=17918) 2024-11-20T23:40:32,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741875_1051 (size=17918) 2024-11-20T23:40:32,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/abf0477033084415b727d55b81cd4e0e 2024-11-20T23:40:32,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/abf0477033084415b727d55b81cd4e0e as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/abf0477033084415b727d55b81cd4e0e 2024-11-20T23:40:32,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/abf0477033084415b727d55b81cd4e0e, entries=12, sequenceid=317, filesize=17.5 K 2024-11-20T23:40:32,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=16.81 KB/17216 for 0d891293336a03b0c41247dd541de035 in 32ms, sequenceid=317, compaction requested=true 2024-11-20T23:40:32,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:32,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d891293336a03b0c41247dd541de035:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T23:40:32,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:32,836 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T23:40:32,837 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 209085 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T23:40:32,837 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1541): 0d891293336a03b0c41247dd541de035/info is initiating minor compaction (all files) 2024-11-20T23:40:32,838 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0d891293336a03b0c41247dd541de035/info in TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:32,838 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/bd7bbb7b33eb4075b4c68a6403228d1c, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/119f9a525620439aa4bb13621e55a58d, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/abf0477033084415b727d55b81cd4e0e] into tmpdir=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp, totalSize=204.2 K 2024-11-20T23:40:32,838 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting bd7bbb7b33eb4075b4c68a6403228d1c, keycount=160, bloomtype=ROW, size=174.5 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732146000185 2024-11-20T23:40:32,838 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 119f9a525620439aa4bb13621e55a58d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732146030766 2024-11-20T23:40:32,839 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] compactions.Compactor(225): Compacting abf0477033084415b727d55b81cd4e0e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732146032780 2024-11-20T23:40:32,851 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d891293336a03b0c41247dd541de035#info#compaction#91 average throughput is 45.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T23:40:32,852 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/c979f14ec3af4eae9c5de4053d7a38b1 is 1080, key is row0062/info:/1732146000185/Put/seqid=0 2024-11-20T23:40:32,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741876_1052 (size=199251) 2024-11-20T23:40:32,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741876_1052 (size=199251) 2024-11-20T23:40:32,859 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/c979f14ec3af4eae9c5de4053d7a38b1 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/c979f14ec3af4eae9c5de4053d7a38b1 2024-11-20T23:40:32,865 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0d891293336a03b0c41247dd541de035/info of 0d891293336a03b0c41247dd541de035 into c979f14ec3af4eae9c5de4053d7a38b1(size=194.6 K), total size for store is 194.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T23:40:32,865 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:32,865 INFO [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035., storeName=0d891293336a03b0c41247dd541de035/info, priority=13, startTime=1732146032836; duration=0sec 2024-11-20T23:40:32,865 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T23:40:32,865 DEBUG [RS:0;412a5e44fd2e:37437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d891293336a03b0c41247dd541de035:info 2024-11-20T23:40:33,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:33,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:34,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:34,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:34,837 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-20T23:40:34,837 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C37437%2C1732145986811.1732146034837 2024-11-20T23:40:34,873 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:34,873 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:34,873 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:34,873 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:34,873 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:34,874 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/WALs/412a5e44fd2e,37437,1732145986811/412a5e44fd2e%2C37437%2C1732145986811.1732145987434 with entries=314, filesize=308.87 KB; new WAL /user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/WALs/412a5e44fd2e,37437,1732145986811/412a5e44fd2e%2C37437%2C1732145986811.1732146034837 2024-11-20T23:40:34,875 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43117:43117),(127.0.0.1/127.0.0.1:37713:37713)] 2024-11-20T23:40:34,875 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/WALs/412a5e44fd2e,37437,1732145986811/412a5e44fd2e%2C37437%2C1732145986811.1732145987434 is not closed yet, will try archiving it next time 2024-11-20T23:40:34,879 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0d891293336a03b0c41247dd541de035 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-20T23:40:34,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741833_1009 (size=316287) 2024-11-20T23:40:34,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741833_1009 (size=316287) 2024-11-20T23:40:34,886 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/56c153228a644fdface0a966c098781d is 1080, key is row0241/info:/1732146032805/Put/seqid=0 2024-11-20T23:40:34,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741878_1054 (size=22254) 2024-11-20T23:40:34,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741878_1054 (size=22254) 2024-11-20T23:40:35,296 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/56c153228a644fdface0a966c098781d 2024-11-20T23:40:35,303 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/.tmp/info/56c153228a644fdface0a966c098781d as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/56c153228a644fdface0a966c098781d 2024-11-20T23:40:35,310 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/56c153228a644fdface0a966c098781d, entries=16, sequenceid=337, filesize=21.7 K 2024-11-20T23:40:35,311 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=0 B/0 for 0d891293336a03b0c41247dd541de035 in 432ms, sequenceid=337, compaction requested=false 2024-11-20T23:40:35,311 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 0d891293336a03b0c41247dd541de035: 2024-11-20T23:40:35,311 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 49ae1279d535b3aafde3aebf3e15e8c0: 2024-11-20T23:40:35,311 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-20T23:40:35,315 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/.tmp/info/6cf6bc6c8c4c4466b6d785fa21beb898 is 186, key is TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0./info:regioninfo/1732146011482/Put/seqid=0 2024-11-20T23:40:35,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741879_1055 (size=6153) 2024-11-20T23:40:35,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741879_1055 (size=6153) 2024-11-20T23:40:35,320 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/.tmp/info/6cf6bc6c8c4c4466b6d785fa21beb898 2024-11-20T23:40:35,325 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/.tmp/info/6cf6bc6c8c4c4466b6d785fa21beb898 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/info/6cf6bc6c8c4c4466b6d785fa21beb898 2024-11-20T23:40:35,330 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/info/6cf6bc6c8c4c4466b6d785fa21beb898, entries=5, sequenceid=21, filesize=6.0 K 2024-11-20T23:40:35,331 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-11-20T23:40:35,331 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-20T23:40:35,331 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C37437%2C1732145986811.1732146035331 2024-11-20T23:40:35,335 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,335 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,335 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,336 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,336 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,336 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/WALs/412a5e44fd2e,37437,1732145986811/412a5e44fd2e%2C37437%2C1732145986811.1732146034837 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/WALs/412a5e44fd2e,37437,1732145986811/412a5e44fd2e%2C37437%2C1732145986811.1732146035331 2024-11-20T23:40:35,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741877_1053 (size=731) 2024-11-20T23:40:35,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741877_1053 (size=731) 2024-11-20T23:40:35,340 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/WALs/412a5e44fd2e,37437,1732145986811/412a5e44fd2e%2C37437%2C1732145986811.1732145987434 to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/oldWALs/412a5e44fd2e%2C37437%2C1732145986811.1732145987434 2024-11-20T23:40:35,341 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37713:37713),(127.0.0.1/127.0.0.1:43117:43117)] 2024-11-20T23:40:35,341 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T23:40:35,341 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/WALs/412a5e44fd2e,37437,1732145986811/412a5e44fd2e%2C37437%2C1732145986811.1732146034837 to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/oldWALs/412a5e44fd2e%2C37437%2C1732145986811.1732146034837 2024-11-20T23:40:35,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T23:40:35,342 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:40:35,342 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:40:35,342 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:40:35,342 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:40:35,342 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T23:40:35,342 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T23:40:35,342 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1470619152, stopped=false 2024-11-20T23:40:35,342 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=412a5e44fd2e,43531,1732145986638 2024-11-20T23:40:35,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:40:35,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:40:35,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:35,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:35,351 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:40:35,351 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:40:35,351 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:40:35,352 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:40:35,352 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:40:35,352 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '412a5e44fd2e,37437,1732145986811' ***** 2024-11-20T23:40:35,352 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T23:40:35,352 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:40:35,352 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T23:40:35,352 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T23:40:35,352 INFO [RS:0;412a5e44fd2e:37437 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T23:40:35,352 INFO [RS:0;412a5e44fd2e:37437 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T23:40:35,353 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(3091): Received CLOSE for 0d891293336a03b0c41247dd541de035 2024-11-20T23:40:35,353 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(3091): Received CLOSE for 49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:35,353 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(959): stopping server 412a5e44fd2e,37437,1732145986811 2024-11-20T23:40:35,353 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:40:35,353 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0d891293336a03b0c41247dd541de035, disabling compactions & flushes 2024-11-20T23:40:35,353 INFO [RS:0;412a5e44fd2e:37437 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;412a5e44fd2e:37437. 2024-11-20T23:40:35,353 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:35,353 DEBUG [RS:0;412a5e44fd2e:37437 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:40:35,353 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:35,353 DEBUG [RS:0;412a5e44fd2e:37437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:40:35,353 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. after waiting 0 ms 2024-11-20T23:40:35,353 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:35,353 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T23:40:35,353 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T23:40:35,353 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T23:40:35,353 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T23:40:35,353 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-20T23:40:35,353 DEBUG [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(1325): Online Regions={0d891293336a03b0c41247dd541de035=TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035., 49ae1279d535b3aafde3aebf3e15e8c0=TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T23:40:35,353 DEBUG [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(1351): Waiting on 0d891293336a03b0c41247dd541de035, 1588230740, 49ae1279d535b3aafde3aebf3e15e8c0 2024-11-20T23:40:35,353 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:40:35,353 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:40:35,353 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:40:35,353 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:40:35,353 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:40:35,353 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e->hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/7a797067eba246aa82883b6778da828e-top, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/4af20445003746b692921f8dbf152a1d, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/fd973901f0e1460dae23cbdb99abcfdb, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/d730df1a49ff4d34852de2ee49dca4aa, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/07506591607c4b15888c0ca29a7d5a2d, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/a90eba00562846dd971f6f809abf77ea, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/ea226fe4bc70407681e4c9fcc2d05f28, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/aa5dac8b3db34e478df78614250e2faa, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/92b713893da440e099582584d3671531, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/18db0ca16b8841d68d2d81a43ad926a8, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/d5d52cd6fb0e46868bb9ab12bf3af8ec, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/e4485c412244485d88db2d01b602757a, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/753e6bc789c84311b2ff172d3254a824, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/eeba59b794a1440f8c07597877fb407a, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/96447f1217844441be3fa74bda682a9a, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/607f5ef1e8eb4e03841f7adc5915d3bc, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/761e49ff1c754210aa2b05cf6514e99f, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/6981dd045b5b49308d2c5f6ace26ae12, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/8bb4221e77e449f9b05556bdd40564e8, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/83d5854640bf4246996e14056386749a, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/5bdf2530aa1d4219850d5b66a7418793, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/bd7bbb7b33eb4075b4c68a6403228d1c, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/0367d6ac66f14480b267e5faa3038b56, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/119f9a525620439aa4bb13621e55a58d, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/abf0477033084415b727d55b81cd4e0e] to archive 2024-11-20T23:40:35,355 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T23:40:35,357 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:40:35,358 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/4af20445003746b692921f8dbf152a1d to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/4af20445003746b692921f8dbf152a1d 2024-11-20T23:40:35,360 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/fd973901f0e1460dae23cbdb99abcfdb to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/fd973901f0e1460dae23cbdb99abcfdb 2024-11-20T23:40:35,361 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/d730df1a49ff4d34852de2ee49dca4aa to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/d730df1a49ff4d34852de2ee49dca4aa 2024-11-20T23:40:35,363 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/07506591607c4b15888c0ca29a7d5a2d to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/07506591607c4b15888c0ca29a7d5a2d 2024-11-20T23:40:35,363 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-20T23:40:35,364 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:40:35,364 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:40:35,364 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732146035353Running coprocessor pre-close hooks at 1732146035353Disabling compacts and flushes for region at 1732146035353Disabling writes for close at 1732146035353Writing region close event to WAL at 1732146035359 (+6 ms)Running coprocessor post-close hooks at 1732146035364 (+5 ms)Closed at 1732146035364 2024-11-20T23:40:35,364 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T23:40:35,364 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/a90eba00562846dd971f6f809abf77ea to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/a90eba00562846dd971f6f809abf77ea 2024-11-20T23:40:35,365 INFO [regionserver/412a5e44fd2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T23:40:35,366 INFO [regionserver/412a5e44fd2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T23:40:35,366 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/ea226fe4bc70407681e4c9fcc2d05f28 to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/ea226fe4bc70407681e4c9fcc2d05f28 2024-11-20T23:40:35,367 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/aa5dac8b3db34e478df78614250e2faa to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/aa5dac8b3db34e478df78614250e2faa 2024-11-20T23:40:35,369 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/92b713893da440e099582584d3671531 to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/92b713893da440e099582584d3671531 2024-11-20T23:40:35,370 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/18db0ca16b8841d68d2d81a43ad926a8 to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/18db0ca16b8841d68d2d81a43ad926a8 2024-11-20T23:40:35,371 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/d5d52cd6fb0e46868bb9ab12bf3af8ec to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/d5d52cd6fb0e46868bb9ab12bf3af8ec 2024-11-20T23:40:35,372 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/e4485c412244485d88db2d01b602757a to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/e4485c412244485d88db2d01b602757a 2024-11-20T23:40:35,373 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/753e6bc789c84311b2ff172d3254a824 to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/753e6bc789c84311b2ff172d3254a824 2024-11-20T23:40:35,373 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/eeba59b794a1440f8c07597877fb407a to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/eeba59b794a1440f8c07597877fb407a 2024-11-20T23:40:35,374 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/96447f1217844441be3fa74bda682a9a to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/96447f1217844441be3fa74bda682a9a 2024-11-20T23:40:35,375 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/607f5ef1e8eb4e03841f7adc5915d3bc to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/607f5ef1e8eb4e03841f7adc5915d3bc 2024-11-20T23:40:35,376 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/761e49ff1c754210aa2b05cf6514e99f to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/761e49ff1c754210aa2b05cf6514e99f 2024-11-20T23:40:35,378 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/6981dd045b5b49308d2c5f6ace26ae12 to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/6981dd045b5b49308d2c5f6ace26ae12 2024-11-20T23:40:35,379 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/8bb4221e77e449f9b05556bdd40564e8 to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/8bb4221e77e449f9b05556bdd40564e8 2024-11-20T23:40:35,380 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/83d5854640bf4246996e14056386749a to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/83d5854640bf4246996e14056386749a 2024-11-20T23:40:35,381 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/5bdf2530aa1d4219850d5b66a7418793 to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/5bdf2530aa1d4219850d5b66a7418793 2024-11-20T23:40:35,383 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/bd7bbb7b33eb4075b4c68a6403228d1c to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/bd7bbb7b33eb4075b4c68a6403228d1c 2024-11-20T23:40:35,384 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/0367d6ac66f14480b267e5faa3038b56 to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/0367d6ac66f14480b267e5faa3038b56 2024-11-20T23:40:35,385 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/119f9a525620439aa4bb13621e55a58d to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/119f9a525620439aa4bb13621e55a58d 2024-11-20T23:40:35,386 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/abf0477033084415b727d55b81cd4e0e to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/info/abf0477033084415b727d55b81cd4e0e 2024-11-20T23:40:35,386 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=412a5e44fd2e:43531 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-20T23:40:35,387 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [4af20445003746b692921f8dbf152a1d=8260, fd973901f0e1460dae23cbdb99abcfdb=12509, d730df1a49ff4d34852de2ee49dca4aa=27778, 07506591607c4b15888c0ca29a7d5a2d=16817, a90eba00562846dd971f6f809abf77ea=21142, ea226fe4bc70407681e4c9fcc2d05f28=57012, aa5dac8b3db34e478df78614250e2faa=17906, 92b713893da440e099582584d3671531=17906, 18db0ca16b8841d68d2d81a43ad926a8=78811, d5d52cd6fb0e46868bb9ab12bf3af8ec=13594, e4485c412244485d88db2d01b602757a=16828, 753e6bc789c84311b2ff172d3254a824=106949, eeba59b794a1440f8c07597877fb407a=21156, 96447f1217844441be3fa74bda682a9a=12516, 607f5ef1e8eb4e03841f7adc5915d3bc=128743, 761e49ff1c754210aa2b05cf6514e99f=19000, 6981dd045b5b49308d2c5f6ace26ae12=19000, 8bb4221e77e449f9b05556bdd40564e8=150480, 83d5854640bf4246996e14056386749a=12518, 5bdf2530aa1d4219850d5b66a7418793=16839, bd7bbb7b33eb4075b4c68a6403228d1c=178644, 0367d6ac66f14480b267e5faa3038b56=21171, 119f9a525620439aa4bb13621e55a58d=12523, abf0477033084415b727d55b81cd4e0e=17918] 2024-11-20T23:40:35,391 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/0d891293336a03b0c41247dd541de035/recovered.edits/340.seqid, newMaxSeqId=340, maxSeqId=85 2024-11-20T23:40:35,391 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:35,391 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0d891293336a03b0c41247dd541de035: Waiting for close lock at 1732146035353Running coprocessor pre-close hooks at 1732146035353Disabling compacts and flushes for region at 1732146035353Disabling writes for close at 1732146035353Writing region close event to WAL at 1732146035387 (+34 ms)Running coprocessor post-close hooks at 1732146035391 (+4 ms)Closed at 1732146035391 2024-11-20T23:40:35,391 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732146010381.0d891293336a03b0c41247dd541de035. 2024-11-20T23:40:35,391 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 49ae1279d535b3aafde3aebf3e15e8c0, disabling compactions & flushes 2024-11-20T23:40:35,391 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0. 2024-11-20T23:40:35,391 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0. 2024-11-20T23:40:35,392 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0. after waiting 0 ms 2024-11-20T23:40:35,392 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0. 2024-11-20T23:40:35,392 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0/info/7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e->hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/11bd138fc4c82dfa760e3563dd81a50e/info/7a797067eba246aa82883b6778da828e-bottom] to archive 2024-11-20T23:40:35,393 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T23:40:35,395 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0/info/7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e to hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/archive/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0/info/7a797067eba246aa82883b6778da828e.11bd138fc4c82dfa760e3563dd81a50e 2024-11-20T23:40:35,395 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-20T23:40:35,399 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/data/default/TestLogRolling-testLogRolling/49ae1279d535b3aafde3aebf3e15e8c0/recovered.edits/90.seqid, newMaxSeqId=90, maxSeqId=85 2024-11-20T23:40:35,399 INFO [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0. 2024-11-20T23:40:35,399 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 49ae1279d535b3aafde3aebf3e15e8c0: Waiting for close lock at 1732146035391Running coprocessor pre-close hooks at 1732146035391Disabling compacts and flushes for region at 1732146035391Disabling writes for close at 1732146035392 (+1 ms)Writing region close event to WAL at 1732146035395 (+3 ms)Running coprocessor post-close hooks at 1732146035399 (+4 ms)Closed at 1732146035399 2024-11-20T23:40:35,400 DEBUG [RS_CLOSE_REGION-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732146010381.49ae1279d535b3aafde3aebf3e15e8c0. 2024-11-20T23:40:35,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:35,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:35,553 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(976): stopping server 412a5e44fd2e,37437,1732145986811; all regions closed. 2024-11-20T23:40:35,554 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,554 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,554 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,554 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,555 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741834_1010 (size=8107) 2024-11-20T23:40:35,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741834_1010 (size=8107) 2024-11-20T23:40:35,562 DEBUG [RS:0;412a5e44fd2e:37437 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/oldWALs 2024-11-20T23:40:35,562 INFO [RS:0;412a5e44fd2e:37437 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C37437%2C1732145986811.meta:.meta(num 1732145987792) 2024-11-20T23:40:35,562 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,562 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,563 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,563 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,563 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741880_1056 (size=778) 2024-11-20T23:40:35,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741880_1056 (size=778) 2024-11-20T23:40:35,567 DEBUG [RS:0;412a5e44fd2e:37437 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/oldWALs 2024-11-20T23:40:35,567 INFO [RS:0;412a5e44fd2e:37437 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C37437%2C1732145986811:(num 1732146035331) 2024-11-20T23:40:35,567 DEBUG [RS:0;412a5e44fd2e:37437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:40:35,567 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:40:35,567 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:40:35,567 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.ChoreService(370): Chore service for: regionserver/412a5e44fd2e:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-20T23:40:35,567 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:40:35,567 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:40:35,567 INFO [RS:0;412a5e44fd2e:37437 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37437 2024-11-20T23:40:35,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:40:35,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/412a5e44fd2e,37437,1732145986811 2024-11-20T23:40:35,572 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:40:35,582 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [412a5e44fd2e,37437,1732145986811] 2024-11-20T23:40:35,593 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/412a5e44fd2e,37437,1732145986811 already deleted, retry=false 2024-11-20T23:40:35,593 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 412a5e44fd2e,37437,1732145986811 expired; onlineServers=0 2024-11-20T23:40:35,593 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '412a5e44fd2e,43531,1732145986638' ***** 2024-11-20T23:40:35,593 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T23:40:35,593 INFO [M:0;412a5e44fd2e:43531 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:40:35,594 INFO [M:0;412a5e44fd2e:43531 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:40:35,594 DEBUG [M:0;412a5e44fd2e:43531 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T23:40:35,594 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T23:40:35,594 DEBUG [M:0;412a5e44fd2e:43531 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T23:40:35,594 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145987160 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732145987160,5,FailOnTimeoutGroup] 2024-11-20T23:40:35,594 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145987161 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732145987161,5,FailOnTimeoutGroup] 2024-11-20T23:40:35,594 INFO [M:0;412a5e44fd2e:43531 {}] hbase.ChoreService(370): Chore service for: master/412a5e44fd2e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T23:40:35,594 INFO [M:0;412a5e44fd2e:43531 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:40:35,595 DEBUG [M:0;412a5e44fd2e:43531 {}] master.HMaster(1795): Stopping service threads 2024-11-20T23:40:35,595 INFO [M:0;412a5e44fd2e:43531 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T23:40:35,595 INFO [M:0;412a5e44fd2e:43531 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:40:35,595 INFO [M:0;412a5e44fd2e:43531 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T23:40:35,595 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T23:40:35,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T23:40:35,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:35,604 DEBUG [M:0;412a5e44fd2e:43531 {}] zookeeper.ZKUtil(347): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T23:40:35,604 WARN [M:0;412a5e44fd2e:43531 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T23:40:35,604 INFO [M:0;412a5e44fd2e:43531 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/.lastflushedseqids 2024-11-20T23:40:35,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741881_1057 (size=228) 2024-11-20T23:40:35,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741881_1057 (size=228) 2024-11-20T23:40:35,612 INFO [M:0;412a5e44fd2e:43531 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T23:40:35,612 INFO [M:0;412a5e44fd2e:43531 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T23:40:35,612 DEBUG [M:0;412a5e44fd2e:43531 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:40:35,612 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:40:35,612 DEBUG [M:0;412a5e44fd2e:43531 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:40:35,612 DEBUG [M:0;412a5e44fd2e:43531 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:40:35,612 DEBUG [M:0;412a5e44fd2e:43531 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:40:35,612 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-11-20T23:40:35,631 DEBUG [M:0;412a5e44fd2e:43531 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/122a05c0fe6b4641b014ace62406e50d is 82, key is hbase:meta,,1/info:regioninfo/1732145987817/Put/seqid=0 2024-11-20T23:40:35,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741882_1058 (size=5672) 2024-11-20T23:40:35,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741882_1058 (size=5672) 2024-11-20T23:40:35,636 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/122a05c0fe6b4641b014ace62406e50d 2024-11-20T23:40:35,656 DEBUG [M:0;412a5e44fd2e:43531 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2106c93ea59547979535163cfc6b87a5 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732145988323/Put/seqid=0 2024-11-20T23:40:35,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741883_1059 (size=7091) 2024-11-20T23:40:35,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741883_1059 (size=7091) 2024-11-20T23:40:35,661 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2106c93ea59547979535163cfc6b87a5 2024-11-20T23:40:35,666 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2106c93ea59547979535163cfc6b87a5 2024-11-20T23:40:35,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:40:35,683 INFO [RS:0;412a5e44fd2e:37437 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:40:35,683 INFO [RS:0;412a5e44fd2e:37437 {}] regionserver.HRegionServer(1031): Exiting; stopping=412a5e44fd2e,37437,1732145986811; zookeeper connection closed. 2024-11-20T23:40:35,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37437-0x1015a9e37e90001, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:40:35,683 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1f9aca86 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1f9aca86 2024-11-20T23:40:35,683 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T23:40:35,685 DEBUG [M:0;412a5e44fd2e:43531 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9918b3886f9a4852996a079a2b5ef180 is 69, key is 412a5e44fd2e,37437,1732145986811/rs:state/1732145987282/Put/seqid=0 2024-11-20T23:40:35,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741884_1060 (size=5156) 2024-11-20T23:40:35,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741884_1060 (size=5156) 2024-11-20T23:40:35,689 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9918b3886f9a4852996a079a2b5ef180 2024-11-20T23:40:35,708 DEBUG [M:0;412a5e44fd2e:43531 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/add136f7fcea4730ab29ca61a7704989 is 52, key is load_balancer_on/state:d/1732145987951/Put/seqid=0 2024-11-20T23:40:35,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741885_1061 (size=5056) 2024-11-20T23:40:35,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741885_1061 (size=5056) 2024-11-20T23:40:35,713 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/add136f7fcea4730ab29ca61a7704989 2024-11-20T23:40:35,718 DEBUG [M:0;412a5e44fd2e:43531 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/122a05c0fe6b4641b014ace62406e50d as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/122a05c0fe6b4641b014ace62406e50d 2024-11-20T23:40:35,722 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/122a05c0fe6b4641b014ace62406e50d, entries=8, sequenceid=125, filesize=5.5 K 2024-11-20T23:40:35,723 DEBUG [M:0;412a5e44fd2e:43531 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2106c93ea59547979535163cfc6b87a5 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2106c93ea59547979535163cfc6b87a5 2024-11-20T23:40:35,727 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2106c93ea59547979535163cfc6b87a5 2024-11-20T23:40:35,727 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2106c93ea59547979535163cfc6b87a5, entries=13, sequenceid=125, filesize=6.9 K 2024-11-20T23:40:35,728 DEBUG [M:0;412a5e44fd2e:43531 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9918b3886f9a4852996a079a2b5ef180 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9918b3886f9a4852996a079a2b5ef180 2024-11-20T23:40:35,732 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9918b3886f9a4852996a079a2b5ef180, entries=1, sequenceid=125, filesize=5.0 K 2024-11-20T23:40:35,733 DEBUG [M:0;412a5e44fd2e:43531 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/add136f7fcea4730ab29ca61a7704989 as hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/add136f7fcea4730ab29ca61a7704989 2024-11-20T23:40:35,737 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46237/user/jenkins/test-data/7e3eefdc-9180-25e6-ec24-8d2e03c66b9e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/add136f7fcea4730ab29ca61a7704989, entries=1, sequenceid=125, filesize=4.9 K 2024-11-20T23:40:35,738 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=125, compaction requested=false 2024-11-20T23:40:35,739 INFO [M:0;412a5e44fd2e:43531 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:40:35,739 DEBUG [M:0;412a5e44fd2e:43531 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732146035612Disabling compacts and flushes for region at 1732146035612Disabling writes for close at 1732146035612Obtaining lock to block concurrent updates at 1732146035612Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732146035612Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1732146035613 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732146035614 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732146035614Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732146035631 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732146035631Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732146035640 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732146035656 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732146035656Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732146035666 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732146035684 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732146035684Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732146035693 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732146035707 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732146035707Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@685e1dc2: reopening flushed file at 1732146035718 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30bbf192: reopening flushed file at 1732146035723 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23e8ebbb: reopening flushed file at 1732146035727 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@736a8785: reopening flushed file at 1732146035732 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=125, compaction requested=false at 1732146035738 (+6 ms)Writing region close event to WAL at 1732146035739 (+1 ms)Closed at 1732146035739 2024-11-20T23:40:35,739 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,739 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,739 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,740 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,740 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:35,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741830_1006 (size=61332) 2024-11-20T23:40:35,742 INFO [M:0;412a5e44fd2e:43531 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T23:40:35,742 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:40:35,742 INFO [M:0;412a5e44fd2e:43531 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43531 2024-11-20T23:40:35,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45255 is added to blk_1073741830_1006 (size=61332) 2024-11-20T23:40:35,743 INFO [M:0;412a5e44fd2e:43531 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:40:35,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:40:35,902 INFO [M:0;412a5e44fd2e:43531 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:40:35,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43531-0x1015a9e37e90000, quorum=127.0.0.1:52619, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:40:35,907 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76a5ebe7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:40:35,907 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@268a31fc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:40:35,908 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:40:35,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1284b092{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:40:35,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41c54b7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/hadoop.log.dir/,STOPPED} 2024-11-20T23:40:35,910 WARN [BP-1136154938-172.17.0.2-1732145983999 heartbeating to localhost/127.0.0.1:46237 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:40:35,910 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:40:35,910 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:40:35,910 WARN [BP-1136154938-172.17.0.2-1732145983999 heartbeating to localhost/127.0.0.1:46237 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1136154938-172.17.0.2-1732145983999 (Datanode Uuid f2abca98-4ce0-47cb-8d82-cfa1df5ffe95) service to localhost/127.0.0.1:46237 2024-11-20T23:40:35,911 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/cluster_ab314d93-baee-14a4-b97f-d9f869779a1f/data/data3/current/BP-1136154938-172.17.0.2-1732145983999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:40:35,911 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/cluster_ab314d93-baee-14a4-b97f-d9f869779a1f/data/data4/current/BP-1136154938-172.17.0.2-1732145983999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:40:35,911 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:40:35,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ec16a78{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:40:35,913 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a6db152{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:40:35,913 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:40:35,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7986f193{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:40:35,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@417c1a7a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/hadoop.log.dir/,STOPPED} 2024-11-20T23:40:35,915 WARN [BP-1136154938-172.17.0.2-1732145983999 heartbeating to localhost/127.0.0.1:46237 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:40:35,915 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:40:35,915 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:40:35,915 WARN [BP-1136154938-172.17.0.2-1732145983999 heartbeating to localhost/127.0.0.1:46237 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1136154938-172.17.0.2-1732145983999 (Datanode Uuid bdf1ea94-15c8-474b-a5f6-fb9b49710790) service to localhost/127.0.0.1:46237 2024-11-20T23:40:35,915 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/cluster_ab314d93-baee-14a4-b97f-d9f869779a1f/data/data1/current/BP-1136154938-172.17.0.2-1732145983999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:40:35,916 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/cluster_ab314d93-baee-14a4-b97f-d9f869779a1f/data/data2/current/BP-1136154938-172.17.0.2-1732145983999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:40:35,916 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:40:35,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9612b29{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:40:35,921 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@277e18bc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:40:35,921 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:40:35,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a67ff9c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:40:35,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@517c2cfc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/hadoop.log.dir/,STOPPED} 2024-11-20T23:40:35,926 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T23:40:35,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T23:40:35,963 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 207) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46237 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46237 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46237 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46237 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46237 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:46237 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46237 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46237 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=318 (was 325), ProcessCount=11 (was 11), AvailableMemoryMB=1780 (was 249) - AvailableMemoryMB LEAK? - 2024-11-20T23:40:35,970 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=318, ProcessCount=11, AvailableMemoryMB=1780 2024-11-20T23:40:35,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T23:40:35,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/hadoop.log.dir so I do NOT create it in target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc 2024-11-20T23:40:35,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f852282d-9318-468a-24ec-1f5594a97c2d/hadoop.tmp.dir so I do NOT create it in target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc 2024-11-20T23:40:35,970 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/cluster_b99443fa-5566-c13e-e485-84ae252a9e2f, deleteOnExit=true 2024-11-20T23:40:35,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T23:40:35,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/test.cache.data in system properties and HBase conf 2024-11-20T23:40:35,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T23:40:35,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/hadoop.log.dir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T23:40:35,971 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/nfs.dump.dir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/java.io.tmpdir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T23:40:35,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T23:40:35,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T23:40:35,985 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:40:36,356 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:40:36,359 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:40:36,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:40:36,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:40:36,360 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T23:40:36,361 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:40:36,361 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@814e400{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:40:36,361 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7748f5df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:40:36,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39d5f486{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/java.io.tmpdir/jetty-localhost-35301-hadoop-hdfs-3_4_1-tests_jar-_-any-7911011414525954378/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:40:36,456 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@380ffe40{HTTP/1.1, (http/1.1)}{localhost:35301} 2024-11-20T23:40:36,456 INFO [Time-limited test {}] server.Server(415): Started @306656ms 2024-11-20T23:40:36,466 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T23:40:36,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:36,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:36,724 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:40:36,726 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:40:36,727 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:40:36,727 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:40:36,727 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T23:40:36,727 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1773ea07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:40:36,728 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fea8446{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:40:36,824 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@29eb302d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/java.io.tmpdir/jetty-localhost-43021-hadoop-hdfs-3_4_1-tests_jar-_-any-16090034229402950626/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:40:36,824 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40258dfd{HTTP/1.1, (http/1.1)}{localhost:43021} 2024-11-20T23:40:36,825 INFO [Time-limited test {}] server.Server(415): Started @307025ms 2024-11-20T23:40:36,825 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:40:36,850 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T23:40:36,853 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T23:40:36,853 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T23:40:36,854 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T23:40:36,854 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T23:40:36,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6df20715{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/hadoop.log.dir/,AVAILABLE} 2024-11-20T23:40:36,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@254f2495{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T23:40:36,953 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f4fc7f4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/java.io.tmpdir/jetty-localhost-46043-hadoop-hdfs-3_4_1-tests_jar-_-any-8146787027085522968/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:40:36,954 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10391316{HTTP/1.1, (http/1.1)}{localhost:46043} 2024-11-20T23:40:36,954 INFO [Time-limited test {}] server.Server(415): Started @307154ms 2024-11-20T23:40:36,955 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T23:40:36,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:40:36,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T23:40:36,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T23:40:36,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-20T23:40:37,300 INFO [regionserver/412a5e44fd2e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:40:37,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:37,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:37,697 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=9, created chunk count=9, reused chunk count=72, reuseRatio=88.89% 2024-11-20T23:40:37,697 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-20T23:40:38,241 WARN [Thread-2507 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/cluster_b99443fa-5566-c13e-e485-84ae252a9e2f/data/data1/current/BP-1203127376-172.17.0.2-1732146035988/current, will proceed with Du for space computation calculation, 2024-11-20T23:40:38,241 WARN [Thread-2508 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/cluster_b99443fa-5566-c13e-e485-84ae252a9e2f/data/data2/current/BP-1203127376-172.17.0.2-1732146035988/current, will proceed with Du for space computation calculation, 2024-11-20T23:40:38,259 WARN [Thread-2471 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:40:38,261 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22ea357bf513397c with lease ID 0x9c9c104773fafac6: Processing first storage report for DS-f7c666ca-927a-4def-bcf3-961b42ff5237 from datanode DatanodeRegistration(127.0.0.1:33413, datanodeUuid=0c77d1e7-69b5-4406-ab8f-108cb045a18c, infoPort=41449, infoSecurePort=0, ipcPort=34363, storageInfo=lv=-57;cid=testClusterID;nsid=1754926813;c=1732146035988) 2024-11-20T23:40:38,261 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22ea357bf513397c with lease ID 0x9c9c104773fafac6: from storage DS-f7c666ca-927a-4def-bcf3-961b42ff5237 node DatanodeRegistration(127.0.0.1:33413, datanodeUuid=0c77d1e7-69b5-4406-ab8f-108cb045a18c, infoPort=41449, infoSecurePort=0, ipcPort=34363, storageInfo=lv=-57;cid=testClusterID;nsid=1754926813;c=1732146035988), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:40:38,261 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22ea357bf513397c with lease ID 0x9c9c104773fafac6: Processing first storage report for DS-ddbde617-d1a1-4c16-abd5-ab92171b6908 from datanode DatanodeRegistration(127.0.0.1:33413, datanodeUuid=0c77d1e7-69b5-4406-ab8f-108cb045a18c, infoPort=41449, infoSecurePort=0, ipcPort=34363, storageInfo=lv=-57;cid=testClusterID;nsid=1754926813;c=1732146035988) 2024-11-20T23:40:38,261 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22ea357bf513397c with lease ID 0x9c9c104773fafac6: from storage DS-ddbde617-d1a1-4c16-abd5-ab92171b6908 node DatanodeRegistration(127.0.0.1:33413, datanodeUuid=0c77d1e7-69b5-4406-ab8f-108cb045a18c, infoPort=41449, infoSecurePort=0, ipcPort=34363, storageInfo=lv=-57;cid=testClusterID;nsid=1754926813;c=1732146035988), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:40:38,365 WARN [Thread-2518 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/cluster_b99443fa-5566-c13e-e485-84ae252a9e2f/data/data3/current/BP-1203127376-172.17.0.2-1732146035988/current, will proceed with Du for space computation calculation, 2024-11-20T23:40:38,365 WARN [Thread-2519 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/cluster_b99443fa-5566-c13e-e485-84ae252a9e2f/data/data4/current/BP-1203127376-172.17.0.2-1732146035988/current, will proceed with Du for space computation calculation, 2024-11-20T23:40:38,385 WARN [Thread-2494 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T23:40:38,387 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe9abb23bfae3a605 with lease ID 0x9c9c104773fafac7: Processing first storage report for DS-4d23e44a-db79-4fdc-bb46-c267786bc6c0 from datanode DatanodeRegistration(127.0.0.1:39537, datanodeUuid=ded324ec-b789-4d79-8572-d0d125678bdb, infoPort=37795, infoSecurePort=0, ipcPort=46381, storageInfo=lv=-57;cid=testClusterID;nsid=1754926813;c=1732146035988) 2024-11-20T23:40:38,387 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe9abb23bfae3a605 with lease ID 0x9c9c104773fafac7: from storage DS-4d23e44a-db79-4fdc-bb46-c267786bc6c0 node DatanodeRegistration(127.0.0.1:39537, datanodeUuid=ded324ec-b789-4d79-8572-d0d125678bdb, infoPort=37795, infoSecurePort=0, ipcPort=46381, storageInfo=lv=-57;cid=testClusterID;nsid=1754926813;c=1732146035988), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:40:38,387 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe9abb23bfae3a605 with lease ID 0x9c9c104773fafac7: Processing first storage report for DS-29f18450-0f1f-4ee9-b5ac-812097e5f9a1 from datanode DatanodeRegistration(127.0.0.1:39537, datanodeUuid=ded324ec-b789-4d79-8572-d0d125678bdb, infoPort=37795, infoSecurePort=0, ipcPort=46381, storageInfo=lv=-57;cid=testClusterID;nsid=1754926813;c=1732146035988) 2024-11-20T23:40:38,387 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe9abb23bfae3a605 with lease ID 0x9c9c104773fafac7: from storage DS-29f18450-0f1f-4ee9-b5ac-812097e5f9a1 node DatanodeRegistration(127.0.0.1:39537, datanodeUuid=ded324ec-b789-4d79-8572-d0d125678bdb, infoPort=37795, infoSecurePort=0, ipcPort=46381, storageInfo=lv=-57;cid=testClusterID;nsid=1754926813;c=1732146035988), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T23:40:38,486 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc 2024-11-20T23:40:38,489 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/cluster_b99443fa-5566-c13e-e485-84ae252a9e2f/zookeeper_0, clientPort=52365, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/cluster_b99443fa-5566-c13e-e485-84ae252a9e2f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/cluster_b99443fa-5566-c13e-e485-84ae252a9e2f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T23:40:38,490 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52365 2024-11-20T23:40:38,490 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:40:38,491 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:40:38,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:40:38,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741825_1001 (size=7) 2024-11-20T23:40:38,502 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417 with version=8 2024-11-20T23:40:38,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42045/user/jenkins/test-data/432e3635-7e15-4e9e-54fd-268361730346/hbase-staging 2024-11-20T23:40:38,504 INFO [Time-limited test {}] client.ConnectionUtils(128): master/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:40:38,504 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:40:38,505 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:40:38,505 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:40:38,505 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:40:38,505 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:40:38,505 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T23:40:38,505 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:40:38,505 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40175 2024-11-20T23:40:38,506 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40175 connecting to ZooKeeper ensemble=127.0.0.1:52365 2024-11-20T23:40:38,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:38,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:38,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:401750x0, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:40:38,579 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40175-0x1015a9f02840000 connected 2024-11-20T23:40:38,662 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:40:38,664 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:40:38,667 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:40:38,667 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417, hbase.cluster.distributed=false 2024-11-20T23:40:38,668 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:40:38,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40175 2024-11-20T23:40:38,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40175 2024-11-20T23:40:38,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40175 2024-11-20T23:40:38,670 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40175 2024-11-20T23:40:38,670 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40175 2024-11-20T23:40:38,683 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/412a5e44fd2e:0 server-side Connection retries=45 2024-11-20T23:40:38,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:40:38,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T23:40:38,683 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T23:40:38,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T23:40:38,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T23:40:38,683 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T23:40:38,683 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T23:40:38,683 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36965 2024-11-20T23:40:38,684 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36965 connecting to ZooKeeper ensemble=127.0.0.1:52365 2024-11-20T23:40:38,685 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:40:38,686 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:40:38,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:369650x0, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T23:40:38,694 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36965-0x1015a9f02840001 connected 2024-11-20T23:40:38,694 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:40:38,694 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T23:40:38,695 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T23:40:38,695 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T23:40:38,696 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T23:40:38,697 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36965 2024-11-20T23:40:38,697 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36965 2024-11-20T23:40:38,697 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36965 2024-11-20T23:40:38,698 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36965 2024-11-20T23:40:38,698 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36965 2024-11-20T23:40:38,708 DEBUG [M:0;412a5e44fd2e:40175 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;412a5e44fd2e:40175 2024-11-20T23:40:38,709 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/412a5e44fd2e,40175,1732146038504 2024-11-20T23:40:38,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:40:38,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:40:38,715 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/412a5e44fd2e,40175,1732146038504 2024-11-20T23:40:38,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:38,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T23:40:38,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:38,726 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T23:40:38,726 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/412a5e44fd2e,40175,1732146038504 from backup master directory 2024-11-20T23:40:38,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/412a5e44fd2e,40175,1732146038504 2024-11-20T23:40:38,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:40:38,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T23:40:38,736 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:40:38,736 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=412a5e44fd2e,40175,1732146038504 2024-11-20T23:40:38,739 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/hbase.id] with ID: 57be3efa-39ea-4fa8-9a8c-19811fc46163 2024-11-20T23:40:38,739 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/.tmp/hbase.id 2024-11-20T23:40:38,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:40:38,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741826_1002 (size=42) 2024-11-20T23:40:38,745 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/.tmp/hbase.id]:[hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/hbase.id] 2024-11-20T23:40:38,755 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:40:38,755 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T23:40:38,756 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-20T23:40:38,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:38,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:38,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:40:38,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741827_1003 (size=196) 2024-11-20T23:40:38,773 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T23:40:38,774 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T23:40:38,774 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:40:38,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:40:38,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741828_1004 (size=1189) 2024-11-20T23:40:38,782 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store 2024-11-20T23:40:38,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:40:38,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741829_1005 (size=34) 2024-11-20T23:40:38,789 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:40:38,789 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:40:38,789 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:40:38,789 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:40:38,789 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:40:38,789 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:40:38,789 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:40:38,789 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732146038789Disabling compacts and flushes for region at 1732146038789Disabling writes for close at 1732146038789Writing region close event to WAL at 1732146038789Closed at 1732146038789 2024-11-20T23:40:38,790 WARN [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/.initializing 2024-11-20T23:40:38,790 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/WALs/412a5e44fd2e,40175,1732146038504 2024-11-20T23:40:38,793 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C40175%2C1732146038504, suffix=, logDir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/WALs/412a5e44fd2e,40175,1732146038504, archiveDir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/oldWALs, maxLogs=10 2024-11-20T23:40:38,794 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C40175%2C1732146038504.1732146038794 2024-11-20T23:40:38,800 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/WALs/412a5e44fd2e,40175,1732146038504/412a5e44fd2e%2C40175%2C1732146038504.1732146038794 2024-11-20T23:40:38,801 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41449:41449),(127.0.0.1/127.0.0.1:37795:37795)] 2024-11-20T23:40:38,802 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:40:38,802 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:40:38,802 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:40:38,802 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:40:38,804 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:40:38,805 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T23:40:38,805 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:38,806 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:40:38,806 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:40:38,807 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T23:40:38,808 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:38,808 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:40:38,808 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:40:38,810 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T23:40:38,810 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:38,810 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:40:38,810 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:40:38,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T23:40:38,812 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:38,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T23:40:38,812 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:40:38,813 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:40:38,814 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:40:38,815 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:40:38,815 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:40:38,816 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T23:40:38,817 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T23:40:38,820 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:40:38,820 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782223, jitterRate=-0.005353152751922607}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T23:40:38,821 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732146038802Initializing all the Stores at 1732146038803 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732146038803Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732146038803Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732146038803Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732146038803Cleaning up temporary data from old regions at 1732146038815 (+12 ms)Region opened successfully at 1732146038821 (+6 ms) 2024-11-20T23:40:38,821 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T23:40:38,824 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13c2622c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:40:38,825 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T23:40:38,825 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T23:40:38,825 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T23:40:38,825 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T23:40:38,825 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T23:40:38,826 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T23:40:38,826 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T23:40:38,828 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T23:40:38,829 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T23:40:38,841 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T23:40:38,842 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T23:40:38,842 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T23:40:38,852 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T23:40:38,852 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T23:40:38,854 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T23:40:38,862 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T23:40:38,864 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T23:40:38,873 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T23:40:38,876 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T23:40:38,883 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T23:40:38,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:40:38,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T23:40:38,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:38,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:38,895 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=412a5e44fd2e,40175,1732146038504, sessionid=0x1015a9f02840000, setting cluster-up flag (Was=false) 2024-11-20T23:40:38,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:38,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:38,946 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T23:40:38,948 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,40175,1732146038504 2024-11-20T23:40:38,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:38,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:38,999 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T23:40:39,001 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=412a5e44fd2e,40175,1732146038504 2024-11-20T23:40:39,002 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T23:40:39,007 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T23:40:39,007 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T23:40:39,007 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T23:40:39,008 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 412a5e44fd2e,40175,1732146038504 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T23:40:39,011 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:40:39,011 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:40:39,011 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:40:39,011 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=5, maxPoolSize=5 2024-11-20T23:40:39,011 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/412a5e44fd2e:0, corePoolSize=10, maxPoolSize=10 2024-11-20T23:40:39,011 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,011 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:40:39,011 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,017 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:40:39,017 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T23:40:39,019 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:39,019 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T23:40:39,020 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732146069020 2024-11-20T23:40:39,020 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T23:40:39,021 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T23:40:39,021 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T23:40:39,021 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T23:40:39,021 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T23:40:39,021 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T23:40:39,021 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,022 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T23:40:39,022 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T23:40:39,022 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T23:40:39,024 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T23:40:39,024 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T23:40:39,029 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732146039024,5,FailOnTimeoutGroup] 2024-11-20T23:40:39,029 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732146039029,5,FailOnTimeoutGroup] 2024-11-20T23:40:39,029 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,029 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T23:40:39,029 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,029 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:40:39,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741831_1007 (size=1321) 2024-11-20T23:40:39,033 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T23:40:39,033 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417 2024-11-20T23:40:39,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:40:39,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741832_1008 (size=32) 2024-11-20T23:40:39,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:40:39,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:40:39,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:40:39,049 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:39,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:40:39,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:40:39,051 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:40:39,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:39,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:40:39,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:40:39,053 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:40:39,053 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:39,054 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:40:39,054 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:40:39,056 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:40:39,056 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:39,057 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:40:39,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:40:39,058 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/data/hbase/meta/1588230740 2024-11-20T23:40:39,058 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/data/hbase/meta/1588230740 2024-11-20T23:40:39,059 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:40:39,059 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:40:39,060 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:40:39,061 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:40:39,064 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T23:40:39,064 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712159, jitterRate=-0.09444352984428406}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:40:39,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732146039042Initializing all the Stores at 1732146039044 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732146039044Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732146039044Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732146039044Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732146039044Cleaning up temporary data from old regions at 1732146039059 (+15 ms)Region opened successfully at 1732146039064 (+5 ms) 2024-11-20T23:40:39,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:40:39,065 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:40:39,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:40:39,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:40:39,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:40:39,065 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:40:39,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732146039065Disabling compacts and flushes for region at 1732146039065Disabling writes for close at 1732146039065Writing region close event to WAL at 1732146039065Closed at 1732146039065 2024-11-20T23:40:39,066 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:40:39,067 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T23:40:39,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T23:40:39,069 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:40:39,070 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T23:40:39,100 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(746): ClusterId : 57be3efa-39ea-4fa8-9a8c-19811fc46163 2024-11-20T23:40:39,100 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T23:40:39,116 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T23:40:39,116 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T23:40:39,126 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T23:40:39,127 DEBUG [RS:0;412a5e44fd2e:36965 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46503829, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=412a5e44fd2e/172.17.0.2:0 2024-11-20T23:40:39,145 DEBUG [RS:0;412a5e44fd2e:36965 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;412a5e44fd2e:36965 2024-11-20T23:40:39,145 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T23:40:39,145 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T23:40:39,145 DEBUG [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T23:40:39,146 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(2659): reportForDuty to master=412a5e44fd2e,40175,1732146038504 with port=36965, startcode=1732146038682 2024-11-20T23:40:39,146 DEBUG [RS:0;412a5e44fd2e:36965 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T23:40:39,153 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47479, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T23:40:39,154 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40175 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 412a5e44fd2e,36965,1732146038682 2024-11-20T23:40:39,154 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40175 {}] master.ServerManager(517): Registering regionserver=412a5e44fd2e,36965,1732146038682 2024-11-20T23:40:39,155 DEBUG [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417 2024-11-20T23:40:39,155 DEBUG [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36077 2024-11-20T23:40:39,156 DEBUG [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T23:40:39,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:40:39,168 DEBUG [RS:0;412a5e44fd2e:36965 {}] zookeeper.ZKUtil(111): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/412a5e44fd2e,36965,1732146038682 2024-11-20T23:40:39,168 WARN [RS:0;412a5e44fd2e:36965 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T23:40:39,168 INFO [RS:0;412a5e44fd2e:36965 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:40:39,168 DEBUG [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/WALs/412a5e44fd2e,36965,1732146038682 2024-11-20T23:40:39,170 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [412a5e44fd2e,36965,1732146038682] 2024-11-20T23:40:39,174 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T23:40:39,175 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T23:40:39,176 INFO [RS:0;412a5e44fd2e:36965 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T23:40:39,176 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,176 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T23:40:39,177 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T23:40:39,177 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/412a5e44fd2e:0, corePoolSize=2, maxPoolSize=2 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/412a5e44fd2e:0, corePoolSize=1, maxPoolSize=1 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:40:39,177 DEBUG [RS:0;412a5e44fd2e:36965 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/412a5e44fd2e:0, corePoolSize=3, maxPoolSize=3 2024-11-20T23:40:39,180 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,180 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,180 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,180 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,180 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,180 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,36965,1732146038682-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:40:39,197 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T23:40:39,198 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,36965,1732146038682-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,198 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,198 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.Replication(171): 412a5e44fd2e,36965,1732146038682 started 2024-11-20T23:40:39,214 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,214 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(1482): Serving as 412a5e44fd2e,36965,1732146038682, RpcServer on 412a5e44fd2e/172.17.0.2:36965, sessionid=0x1015a9f02840001 2024-11-20T23:40:39,214 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T23:40:39,214 DEBUG [RS:0;412a5e44fd2e:36965 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 412a5e44fd2e,36965,1732146038682 2024-11-20T23:40:39,214 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,36965,1732146038682' 2024-11-20T23:40:39,214 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T23:40:39,215 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T23:40:39,215 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T23:40:39,215 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T23:40:39,215 DEBUG [RS:0;412a5e44fd2e:36965 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 412a5e44fd2e,36965,1732146038682 2024-11-20T23:40:39,215 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '412a5e44fd2e,36965,1732146038682' 2024-11-20T23:40:39,215 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T23:40:39,216 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T23:40:39,216 DEBUG [RS:0;412a5e44fd2e:36965 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T23:40:39,216 INFO [RS:0;412a5e44fd2e:36965 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T23:40:39,216 INFO [RS:0;412a5e44fd2e:36965 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T23:40:39,220 WARN [412a5e44fd2e:40175 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T23:40:39,318 INFO [RS:0;412a5e44fd2e:36965 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C36965%2C1732146038682, suffix=, logDir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/WALs/412a5e44fd2e,36965,1732146038682, archiveDir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/oldWALs, maxLogs=32 2024-11-20T23:40:39,319 INFO [RS:0;412a5e44fd2e:36965 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C36965%2C1732146038682.1732146039318 2024-11-20T23:40:39,325 INFO [RS:0;412a5e44fd2e:36965 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/WALs/412a5e44fd2e,36965,1732146038682/412a5e44fd2e%2C36965%2C1732146038682.1732146039318 2024-11-20T23:40:39,331 DEBUG [RS:0;412a5e44fd2e:36965 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41449:41449),(127.0.0.1/127.0.0.1:37795:37795)] 2024-11-20T23:40:39,470 DEBUG [412a5e44fd2e:40175 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T23:40:39,471 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=412a5e44fd2e,36965,1732146038682 2024-11-20T23:40:39,473 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,36965,1732146038682, state=OPENING 2024-11-20T23:40:39,515 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T23:40:39,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:39,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:39,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:39,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:39,559 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:40:39,559 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:40:39,559 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T23:40:39,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,36965,1732146038682}] 2024-11-20T23:40:39,712 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T23:40:39,717 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45747, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T23:40:39,728 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T23:40:39,729 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:40:39,738 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=412a5e44fd2e%2C36965%2C1732146038682.meta, suffix=.meta, logDir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/WALs/412a5e44fd2e,36965,1732146038682, archiveDir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/oldWALs, maxLogs=32 2024-11-20T23:40:39,738 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 412a5e44fd2e%2C36965%2C1732146038682.meta.1732146039738.meta 2024-11-20T23:40:39,775 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/WALs/412a5e44fd2e,36965,1732146038682/412a5e44fd2e%2C36965%2C1732146038682.meta.1732146039738.meta 2024-11-20T23:40:39,794 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37795:37795),(127.0.0.1/127.0.0.1:41449:41449)] 2024-11-20T23:40:39,804 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T23:40:39,805 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T23:40:39,805 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T23:40:39,805 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T23:40:39,805 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T23:40:39,806 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T23:40:39,806 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T23:40:39,806 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T23:40:39,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T23:40:39,813 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T23:40:39,813 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:39,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:40:39,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T23:40:39,815 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T23:40:39,815 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:39,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:40:39,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T23:40:39,817 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T23:40:39,817 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:39,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:40:39,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T23:40:39,818 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T23:40:39,819 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T23:40:39,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T23:40:39,819 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T23:40:39,820 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/data/hbase/meta/1588230740 2024-11-20T23:40:39,821 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/data/hbase/meta/1588230740 2024-11-20T23:40:39,823 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T23:40:39,823 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T23:40:39,824 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T23:40:39,825 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T23:40:39,827 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802259, jitterRate=0.02012556791305542}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T23:40:39,827 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T23:40:39,827 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732146039806Writing region info on filesystem at 1732146039806Initializing all the Stores at 1732146039811 (+5 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732146039811Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732146039811Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732146039812 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732146039812Cleaning up temporary data from old regions at 1732146039823 (+11 ms)Running coprocessor post-open hooks at 1732146039827 (+4 ms)Region opened successfully at 1732146039827 2024-11-20T23:40:39,829 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732146039712 2024-11-20T23:40:39,831 DEBUG [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T23:40:39,831 INFO [RS_OPEN_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T23:40:39,832 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=412a5e44fd2e,36965,1732146038682 2024-11-20T23:40:39,833 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 412a5e44fd2e,36965,1732146038682, state=OPEN 2024-11-20T23:40:39,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:40:39,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T23:40:39,914 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=412a5e44fd2e,36965,1732146038682 2024-11-20T23:40:39,914 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:40:39,914 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T23:40:39,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T23:40:39,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=412a5e44fd2e,36965,1732146038682 in 355 msec 2024-11-20T23:40:39,919 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T23:40:39,919 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 850 msec 2024-11-20T23:40:39,920 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T23:40:39,920 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T23:40:39,921 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:40:39,921 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,36965,1732146038682, seqNum=-1] 2024-11-20T23:40:39,922 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:40:39,923 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35365, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:40:39,928 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 924 msec 2024-11-20T23:40:39,929 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732146039929, completionTime=-1 2024-11-20T23:40:39,929 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T23:40:39,929 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T23:40:39,931 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T23:40:39,931 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732146099931 2024-11-20T23:40:39,931 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732146159931 2024-11-20T23:40:39,931 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-20T23:40:39,931 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,40175,1732146038504-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,931 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,40175,1732146038504-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,931 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,40175,1732146038504-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,932 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-412a5e44fd2e:40175, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,932 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,932 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:39,934 DEBUG [master/412a5e44fd2e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T23:40:39,937 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.201sec 2024-11-20T23:40:39,937 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T23:40:39,937 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T23:40:39,937 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T23:40:39,937 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T23:40:39,937 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T23:40:39,937 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,40175,1732146038504-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T23:40:39,938 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,40175,1732146038504-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T23:40:39,940 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T23:40:39,940 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T23:40:39,940 INFO [master/412a5e44fd2e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=412a5e44fd2e,40175,1732146038504-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T23:40:40,000 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a4beda4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:40:40,001 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 412a5e44fd2e,40175,-1 for getting cluster id 2024-11-20T23:40:40,001 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T23:40:40,002 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '57be3efa-39ea-4fa8-9a8c-19811fc46163' 2024-11-20T23:40:40,003 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T23:40:40,003 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "57be3efa-39ea-4fa8-9a8c-19811fc46163" 2024-11-20T23:40:40,003 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@569b041, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:40:40,003 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [412a5e44fd2e,40175,-1] 2024-11-20T23:40:40,004 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T23:40:40,004 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:40:40,005 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42386, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T23:40:40,006 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59176bc1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T23:40:40,007 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T23:40:40,008 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=412a5e44fd2e,36965,1732146038682, seqNum=-1] 2024-11-20T23:40:40,008 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T23:40:40,010 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51742, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T23:40:40,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=412a5e44fd2e,40175,1732146038504 2024-11-20T23:40:40,012 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T23:40:40,015 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T23:40:40,015 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T23:40:40,017 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/WALs/test.com,8080,1, archiveDir=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/oldWALs, maxLogs=32 2024-11-20T23:40:40,018 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732146040018 2024-11-20T23:40:40,025 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/WALs/test.com,8080,1/test.com%2C8080%2C1.1732146040018 2024-11-20T23:40:40,036 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37795:37795),(127.0.0.1/127.0.0.1:41449:41449)] 2024-11-20T23:40:40,037 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732146040037 2024-11-20T23:40:40,048 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,048 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,048 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,051 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,051 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,051 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/WALs/test.com,8080,1/test.com%2C8080%2C1.1732146040018 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/WALs/test.com,8080,1/test.com%2C8080%2C1.1732146040037 2024-11-20T23:40:40,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741835_1011 (size=93) 2024-11-20T23:40:40,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741835_1011 (size=93) 2024-11-20T23:40:40,057 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41449:41449),(127.0.0.1/127.0.0.1:37795:37795)] 2024-11-20T23:40:40,060 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/WALs/test.com,8080,1/test.com%2C8080%2C1.1732146040018 to hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/oldWALs/test.com%2C8080%2C1.1732146040018 2024-11-20T23:40:40,061 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,061 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,061 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,061 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,061 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741836_1012 (size=93) 2024-11-20T23:40:40,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741836_1012 (size=93) 2024-11-20T23:40:40,074 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/oldWALs 2024-11-20T23:40:40,074 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732146040037) 2024-11-20T23:40:40,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T23:40:40,075 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:40:40,075 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:40:40,075 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:40:40,075 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:40:40,075 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T23:40:40,075 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T23:40:40,075 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=602077431, stopped=false 2024-11-20T23:40:40,075 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=412a5e44fd2e,40175,1732146038504 2024-11-20T23:40:40,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:40:40,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T23:40:40,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:40,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:40,094 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:40:40,094 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T23:40:40,094 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:40:40,094 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:40:40,094 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:40:40,094 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T23:40:40,094 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '412a5e44fd2e,36965,1732146038682' ***** 2024-11-20T23:40:40,095 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T23:40:40,096 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T23:40:40,096 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T23:40:40,096 INFO [RS:0;412a5e44fd2e:36965 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T23:40:40,096 INFO [RS:0;412a5e44fd2e:36965 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T23:40:40,096 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(959): stopping server 412a5e44fd2e,36965,1732146038682 2024-11-20T23:40:40,096 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:40:40,096 INFO [RS:0;412a5e44fd2e:36965 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;412a5e44fd2e:36965. 2024-11-20T23:40:40,096 DEBUG [RS:0;412a5e44fd2e:36965 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T23:40:40,096 DEBUG [RS:0;412a5e44fd2e:36965 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:40:40,096 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T23:40:40,096 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T23:40:40,096 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T23:40:40,096 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T23:40:40,097 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-20T23:40:40,097 DEBUG [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T23:40:40,097 DEBUG [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-20T23:40:40,097 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T23:40:40,097 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T23:40:40,097 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T23:40:40,097 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T23:40:40,097 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T23:40:40,097 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-20T23:40:40,116 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/data/hbase/meta/1588230740/.tmp/ns/448d360d52ff4195a4b8f745b9846fdb is 43, key is default/ns:d/1732146039923/Put/seqid=0 2024-11-20T23:40:40,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741837_1013 (size=5153) 2024-11-20T23:40:40,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741837_1013 (size=5153) 2024-11-20T23:40:40,217 INFO [regionserver/412a5e44fd2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T23:40:40,217 INFO [regionserver/412a5e44fd2e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T23:40:40,297 DEBUG [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-20T23:40:40,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,497 DEBUG [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-20T23:40:40,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,32927,1732145843473/412a5e44fd2e%2C32927%2C1732145843473.1732145843714 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:40,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44951/user/jenkins/test-data/9d1248b8-5367-99c2-5e30-949dfc9db0a1/WALs/412a5e44fd2e,40151,1732145842087/412a5e44fd2e%2C40151%2C1732145842087.meta.1732145843179.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T23:40:40,527 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/data/hbase/meta/1588230740/.tmp/ns/448d360d52ff4195a4b8f745b9846fdb 2024-11-20T23:40:40,534 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/data/hbase/meta/1588230740/.tmp/ns/448d360d52ff4195a4b8f745b9846fdb as hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/data/hbase/meta/1588230740/ns/448d360d52ff4195a4b8f745b9846fdb 2024-11-20T23:40:40,539 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/data/hbase/meta/1588230740/ns/448d360d52ff4195a4b8f745b9846fdb, entries=2, sequenceid=6, filesize=5.0 K 2024-11-20T23:40:40,540 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 443ms, sequenceid=6, compaction requested=false 2024-11-20T23:40:40,543 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T23:40:40,544 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T23:40:40,544 INFO [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T23:40:40,544 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732146040097Running coprocessor pre-close hooks at 1732146040097Disabling compacts and flushes for region at 1732146040097Disabling writes for close at 1732146040097Obtaining lock to block concurrent updates at 1732146040097Preparing flush snapshotting stores in 1588230740 at 1732146040097Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732146040097Flushing stores of hbase:meta,,1.1588230740 at 1732146040098 (+1 ms)Flushing 1588230740/ns: creating writer at 1732146040098Flushing 1588230740/ns: appending metadata at 1732146040115 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732146040115Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5087c5b3: reopening flushed file at 1732146040533 (+418 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 443ms, sequenceid=6, compaction requested=false at 1732146040540 (+7 ms)Writing region close event to WAL at 1732146040540Running coprocessor post-close hooks at 1732146040543 (+3 ms)Closed at 1732146040544 (+1 ms) 2024-11-20T23:40:40,544 DEBUG [RS_CLOSE_META-regionserver/412a5e44fd2e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T23:40:40,697 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(976): stopping server 412a5e44fd2e,36965,1732146038682; all regions closed. 2024-11-20T23:40:40,698 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,698 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,698 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,698 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,698 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741834_1010 (size=1152) 2024-11-20T23:40:40,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741834_1010 (size=1152) 2024-11-20T23:40:40,702 DEBUG [RS:0;412a5e44fd2e:36965 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/oldWALs 2024-11-20T23:40:40,703 INFO [RS:0;412a5e44fd2e:36965 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C36965%2C1732146038682.meta:.meta(num 1732146039738) 2024-11-20T23:40:40,703 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,703 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,704 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,704 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,704 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741833_1009 (size=93) 2024-11-20T23:40:40,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741833_1009 (size=93) 2024-11-20T23:40:40,708 DEBUG [RS:0;412a5e44fd2e:36965 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/oldWALs 2024-11-20T23:40:40,708 INFO [RS:0;412a5e44fd2e:36965 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 412a5e44fd2e%2C36965%2C1732146038682:(num 1732146039318) 2024-11-20T23:40:40,708 DEBUG [RS:0;412a5e44fd2e:36965 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T23:40:40,708 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T23:40:40,708 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:40:40,708 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.ChoreService(370): Chore service for: regionserver/412a5e44fd2e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T23:40:40,708 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:40:40,709 INFO [regionserver/412a5e44fd2e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:40:40,709 INFO [RS:0;412a5e44fd2e:36965 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36965 2024-11-20T23:40:40,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T23:40:40,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/412a5e44fd2e,36965,1732146038682 2024-11-20T23:40:40,725 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:40:40,726 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [412a5e44fd2e,36965,1732146038682] 2024-11-20T23:40:40,767 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/412a5e44fd2e,36965,1732146038682 already deleted, retry=false 2024-11-20T23:40:40,767 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 412a5e44fd2e,36965,1732146038682 expired; onlineServers=0 2024-11-20T23:40:40,767 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '412a5e44fd2e,40175,1732146038504' ***** 2024-11-20T23:40:40,767 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T23:40:40,767 INFO [M:0;412a5e44fd2e:40175 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T23:40:40,767 INFO [M:0;412a5e44fd2e:40175 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T23:40:40,768 DEBUG [M:0;412a5e44fd2e:40175 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T23:40:40,768 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T23:40:40,768 DEBUG [M:0;412a5e44fd2e:40175 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T23:40:40,768 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732146039029 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.small.0-1732146039029,5,FailOnTimeoutGroup] 2024-11-20T23:40:40,768 DEBUG [master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732146039024 {}] cleaner.HFileCleaner(306): Exit Thread[master/412a5e44fd2e:0:becomeActiveMaster-HFileCleaner.large.0-1732146039024,5,FailOnTimeoutGroup] 2024-11-20T23:40:40,768 INFO [M:0;412a5e44fd2e:40175 {}] hbase.ChoreService(370): Chore service for: master/412a5e44fd2e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T23:40:40,768 INFO [M:0;412a5e44fd2e:40175 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T23:40:40,768 DEBUG [M:0;412a5e44fd2e:40175 {}] master.HMaster(1795): Stopping service threads 2024-11-20T23:40:40,768 INFO [M:0;412a5e44fd2e:40175 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T23:40:40,768 INFO [M:0;412a5e44fd2e:40175 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T23:40:40,768 INFO [M:0;412a5e44fd2e:40175 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T23:40:40,768 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T23:40:40,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T23:40:40,832 DEBUG [M:0;412a5e44fd2e:40175 {}] zookeeper.ZKUtil(347): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T23:40:40,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T23:40:40,832 WARN [M:0;412a5e44fd2e:40175 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T23:40:40,832 INFO [M:0;412a5e44fd2e:40175 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/.lastflushedseqids 2024-11-20T23:40:40,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:40:40,836 INFO [RS:0;412a5e44fd2e:36965 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:40:40,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36965-0x1015a9f02840001, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:40:40,836 INFO [RS:0;412a5e44fd2e:36965 {}] regionserver.HRegionServer(1031): Exiting; stopping=412a5e44fd2e,36965,1732146038682; zookeeper connection closed. 2024-11-20T23:40:40,837 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@31e06738 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@31e06738 2024-11-20T23:40:40,837 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T23:40:40,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741838_1014 (size=99) 2024-11-20T23:40:40,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741838_1014 (size=99) 2024-11-20T23:40:40,846 INFO [M:0;412a5e44fd2e:40175 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T23:40:40,846 INFO [M:0;412a5e44fd2e:40175 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T23:40:40,846 DEBUG [M:0;412a5e44fd2e:40175 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T23:40:40,846 INFO [M:0;412a5e44fd2e:40175 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:40:40,846 DEBUG [M:0;412a5e44fd2e:40175 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:40:40,846 DEBUG [M:0;412a5e44fd2e:40175 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T23:40:40,846 DEBUG [M:0;412a5e44fd2e:40175 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:40:40,847 INFO [M:0;412a5e44fd2e:40175 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-20T23:40:40,861 DEBUG [M:0;412a5e44fd2e:40175 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e4a9ec5a717746e996e38adb242f5200 is 82, key is hbase:meta,,1/info:regioninfo/1732146039832/Put/seqid=0 2024-11-20T23:40:40,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741839_1015 (size=5672) 2024-11-20T23:40:40,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741839_1015 (size=5672) 2024-11-20T23:40:40,866 INFO [M:0;412a5e44fd2e:40175 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e4a9ec5a717746e996e38adb242f5200 2024-11-20T23:40:40,883 DEBUG [M:0;412a5e44fd2e:40175 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ba5d278940db4bee89b769617e852b7b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732146039928/Put/seqid=0 2024-11-20T23:40:40,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741840_1016 (size=5275) 2024-11-20T23:40:40,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741840_1016 (size=5275) 2024-11-20T23:40:40,887 INFO [M:0;412a5e44fd2e:40175 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ba5d278940db4bee89b769617e852b7b 2024-11-20T23:40:40,904 DEBUG [M:0;412a5e44fd2e:40175 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1115641792af409380a06a5a0f1ea221 is 69, key is 412a5e44fd2e,36965,1732146038682/rs:state/1732146039154/Put/seqid=0 2024-11-20T23:40:40,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741841_1017 (size=5156) 2024-11-20T23:40:40,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741841_1017 (size=5156) 2024-11-20T23:40:40,909 INFO [M:0;412a5e44fd2e:40175 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1115641792af409380a06a5a0f1ea221 2024-11-20T23:40:40,921 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T23:40:40,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,925 DEBUG [M:0;412a5e44fd2e:40175 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ee7274a71157446895c5e77031dd3a34 is 52, key is load_balancer_on/state:d/1732146040014/Put/seqid=0 2024-11-20T23:40:40,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741842_1018 (size=5056) 2024-11-20T23:40:40,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741842_1018 (size=5056) 2024-11-20T23:40:40,930 INFO [M:0;412a5e44fd2e:40175 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ee7274a71157446895c5e77031dd3a34 2024-11-20T23:40:40,934 DEBUG [M:0;412a5e44fd2e:40175 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e4a9ec5a717746e996e38adb242f5200 as hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e4a9ec5a717746e996e38adb242f5200 2024-11-20T23:40:40,938 INFO [M:0;412a5e44fd2e:40175 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e4a9ec5a717746e996e38adb242f5200, entries=8, sequenceid=29, filesize=5.5 K 2024-11-20T23:40:40,939 DEBUG [M:0;412a5e44fd2e:40175 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ba5d278940db4bee89b769617e852b7b as hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ba5d278940db4bee89b769617e852b7b 2024-11-20T23:40:40,943 INFO [M:0;412a5e44fd2e:40175 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ba5d278940db4bee89b769617e852b7b, entries=3, sequenceid=29, filesize=5.2 K 2024-11-20T23:40:40,944 DEBUG [M:0;412a5e44fd2e:40175 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1115641792af409380a06a5a0f1ea221 as hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1115641792af409380a06a5a0f1ea221 2024-11-20T23:40:40,948 INFO [M:0;412a5e44fd2e:40175 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1115641792af409380a06a5a0f1ea221, entries=1, sequenceid=29, filesize=5.0 K 2024-11-20T23:40:40,949 DEBUG [M:0;412a5e44fd2e:40175 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ee7274a71157446895c5e77031dd3a34 as hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ee7274a71157446895c5e77031dd3a34 2024-11-20T23:40:40,953 INFO [M:0;412a5e44fd2e:40175 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36077/user/jenkins/test-data/fd7465b3-5a44-43ad-07e7-13422d375417/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ee7274a71157446895c5e77031dd3a34, entries=1, sequenceid=29, filesize=4.9 K 2024-11-20T23:40:40,954 INFO [M:0;412a5e44fd2e:40175 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=29, compaction requested=false 2024-11-20T23:40:40,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,955 INFO [M:0;412a5e44fd2e:40175 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T23:40:40,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,955 DEBUG [M:0;412a5e44fd2e:40175 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732146040846Disabling compacts and flushes for region at 1732146040846Disabling writes for close at 1732146040846Obtaining lock to block concurrent updates at 1732146040847 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732146040847Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732146040847Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732146040847Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732146040847Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732146040860 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732146040860Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732146040869 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732146040882 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732146040882Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732146040891 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732146040903 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732146040903Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732146040912 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732146040924 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732146040925 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f6b09b3: reopening flushed file at 1732146040933 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f88293a: reopening flushed file at 1732146040939 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37243d84: reopening flushed file at 1732146040943 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c9e6f0b: reopening flushed file at 1732146040948 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=29, compaction requested=false at 1732146040954 (+6 ms)Writing region close event to WAL at 1732146040955 (+1 ms)Closed at 1732146040955 2024-11-20T23:40:40,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,955 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,955 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,956 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,956 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,956 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T23:40:40,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39537 is added to blk_1073741830_1006 (size=10311) 2024-11-20T23:40:40,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33413 is added to blk_1073741830_1006 (size=10311) 2024-11-20T23:40:40,958 INFO [M:0;412a5e44fd2e:40175 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T23:40:40,958 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T23:40:40,958 INFO [M:0;412a5e44fd2e:40175 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40175 2024-11-20T23:40:40,958 INFO [M:0;412a5e44fd2e:40175 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T23:40:40,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:40,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T23:40:41,112 INFO [M:0;412a5e44fd2e:40175 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T23:40:41,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:40:41,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40175-0x1015a9f02840000, quorum=127.0.0.1:52365, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T23:40:41,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f4fc7f4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:40:41,114 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10391316{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:40:41,114 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:40:41,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@254f2495{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:40:41,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6df20715{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/hadoop.log.dir/,STOPPED} 2024-11-20T23:40:41,115 WARN [BP-1203127376-172.17.0.2-1732146035988 heartbeating to localhost/127.0.0.1:36077 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:40:41,115 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:40:41,115 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:40:41,115 WARN [BP-1203127376-172.17.0.2-1732146035988 heartbeating to localhost/127.0.0.1:36077 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1203127376-172.17.0.2-1732146035988 (Datanode Uuid ded324ec-b789-4d79-8572-d0d125678bdb) service to localhost/127.0.0.1:36077 2024-11-20T23:40:41,116 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/cluster_b99443fa-5566-c13e-e485-84ae252a9e2f/data/data3/current/BP-1203127376-172.17.0.2-1732146035988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:40:41,116 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/cluster_b99443fa-5566-c13e-e485-84ae252a9e2f/data/data4/current/BP-1203127376-172.17.0.2-1732146035988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:40:41,116 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:40:41,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@29eb302d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T23:40:41,119 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40258dfd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:40:41,119 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:40:41,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fea8446{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:40:41,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1773ea07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/hadoop.log.dir/,STOPPED} 2024-11-20T23:40:41,121 WARN [BP-1203127376-172.17.0.2-1732146035988 heartbeating to localhost/127.0.0.1:36077 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T23:40:41,121 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T23:40:41,121 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T23:40:41,121 WARN [BP-1203127376-172.17.0.2-1732146035988 heartbeating to localhost/127.0.0.1:36077 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1203127376-172.17.0.2-1732146035988 (Datanode Uuid 0c77d1e7-69b5-4406-ab8f-108cb045a18c) service to localhost/127.0.0.1:36077 2024-11-20T23:40:41,121 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/cluster_b99443fa-5566-c13e-e485-84ae252a9e2f/data/data1/current/BP-1203127376-172.17.0.2-1732146035988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:40:41,121 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/cluster_b99443fa-5566-c13e-e485-84ae252a9e2f/data/data2/current/BP-1203127376-172.17.0.2-1732146035988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T23:40:41,122 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T23:40:41,126 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39d5f486{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T23:40:41,127 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@380ffe40{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T23:40:41,127 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T23:40:41,127 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7748f5df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T23:40:41,127 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@814e400{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f77283e-ad78-1224-72ce-a9de8cad1ecc/hadoop.log.dir/,STOPPED} 2024-11-20T23:40:41,133 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T23:40:41,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T23:40:41,157 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 232) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36077 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36077 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36077 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36077 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36077 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36077 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:36077 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36077 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=532 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=333 (was 318) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1705 (was 1780)